You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

813 lines
20 KiB

11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
10 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
11 years ago
11 years ago
10 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
10 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
  1. // Modified for Tendermint
  2. // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
  3. // https://github.com/conformal/btcd/blob/master/LICENSE
  4. package p2p
  5. import (
  6. "encoding/binary"
  7. "encoding/json"
  8. "math"
  9. "math/rand"
  10. "net"
  11. "os"
  12. "sync"
  13. "time"
  14. . "github.com/tendermint/tendermint/common"
  15. )
  16. const (
  17. // addresses under which the address manager will claim to need more addresses.
  18. needAddressThreshold = 1000
  19. // interval used to dump the address cache to disk for future use.
  20. dumpAddressInterval = time.Minute * 2
  21. // max addresses in each old address bucket.
  22. oldBucketSize = 64
  23. // buckets we split old addresses over.
  24. oldBucketCount = 64
  25. // max addresses in each new address bucket.
  26. newBucketSize = 64
  27. // buckets that we spread new addresses over.
  28. newBucketCount = 256
  29. // old buckets over which an address group will be spread.
  30. oldBucketsPerGroup = 4
  31. // new buckets over which an source address group will be spread.
  32. newBucketsPerGroup = 32
  33. // buckets a frequently seen new address may end up in.
  34. maxNewBucketsPerAddress = 4
  35. // days before which we assume an address has vanished
  36. // if we have not seen it announced in that long.
  37. numMissingDays = 30
  38. // tries without a single success before we assume an address is bad.
  39. numRetries = 3
  40. // max failures we will accept without a success before considering an address bad.
  41. maxFailures = 10
  42. // days since the last success before we will consider evicting an address.
  43. minBadDays = 7
  44. // % of total addresses known returned by GetSelection.
  45. getSelectionPercent = 23
  46. // min addresses that must be returned by GetSelection. Useful for bootstrapping.
  47. minGetSelection = 32
  48. // max addresses returned by GetSelection
  49. maxGetSelection = 2500
  50. // current version of the on-disk format.
  51. serializationVersion = 1
  52. )
  53. /* AddrBook - concurrency safe peer address manager */
  54. type AddrBook struct {
  55. QuitService
  56. mtx sync.Mutex
  57. filePath string
  58. rand *rand.Rand
  59. key string
  60. ourAddrs map[string]*NetAddress
  61. addrLookup map[string]*knownAddress // new & old
  62. addrNew []map[string]*knownAddress
  63. addrOld []map[string]*knownAddress
  64. wg sync.WaitGroup
  65. nOld int
  66. nNew int
  67. }
  68. const (
  69. bucketTypeNew = 0x01
  70. bucketTypeOld = 0x02
  71. )
  72. // Use Start to begin processing asynchronous address updates.
  73. func NewAddrBook(filePath string) *AddrBook {
  74. am := &AddrBook{
  75. rand: rand.New(rand.NewSource(time.Now().UnixNano())),
  76. ourAddrs: make(map[string]*NetAddress),
  77. addrLookup: make(map[string]*knownAddress),
  78. filePath: filePath,
  79. }
  80. am.init()
  81. am.QuitService = *NewQuitService(log, "AddrBook", am)
  82. return am
  83. }
  84. // When modifying this, don't forget to update loadFromFile()
  85. func (a *AddrBook) init() {
  86. a.key = CRandHex(24) // 24/2 * 8 = 96 bits
  87. // New addr buckets
  88. a.addrNew = make([]map[string]*knownAddress, newBucketCount)
  89. for i := range a.addrNew {
  90. a.addrNew[i] = make(map[string]*knownAddress)
  91. }
  92. // Old addr buckets
  93. a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
  94. for i := range a.addrOld {
  95. a.addrOld[i] = make(map[string]*knownAddress)
  96. }
  97. }
  98. func (a *AddrBook) OnStart() error {
  99. a.QuitService.OnStart()
  100. a.loadFromFile(a.filePath)
  101. a.wg.Add(1)
  102. go a.saveRoutine()
  103. return nil
  104. }
  105. func (a *AddrBook) OnStop() {
  106. a.QuitService.OnStop()
  107. a.wg.Wait()
  108. }
  109. func (a *AddrBook) AddOurAddress(addr *NetAddress) {
  110. a.mtx.Lock()
  111. defer a.mtx.Unlock()
  112. log.Info("Add our address to book", "addr", addr)
  113. a.ourAddrs[addr.String()] = addr
  114. }
  115. func (a *AddrBook) OurAddresses() []*NetAddress {
  116. addrs := []*NetAddress{}
  117. for _, addr := range a.ourAddrs {
  118. addrs = append(addrs, addr)
  119. }
  120. return addrs
  121. }
  122. func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
  123. a.mtx.Lock()
  124. defer a.mtx.Unlock()
  125. log.Info("Add address to book", "addr", addr, "src", src)
  126. a.addAddress(addr, src)
  127. }
  128. func (a *AddrBook) NeedMoreAddrs() bool {
  129. return a.Size() < needAddressThreshold
  130. }
  131. func (a *AddrBook) Size() int {
  132. a.mtx.Lock()
  133. defer a.mtx.Unlock()
  134. return a.size()
  135. }
  136. func (a *AddrBook) size() int {
  137. return a.nNew + a.nOld
  138. }
  139. // Pick an address to connect to with new/old bias.
  140. func (a *AddrBook) PickAddress(newBias int) *NetAddress {
  141. a.mtx.Lock()
  142. defer a.mtx.Unlock()
  143. if a.size() == 0 {
  144. return nil
  145. }
  146. if newBias > 100 {
  147. newBias = 100
  148. }
  149. if newBias < 0 {
  150. newBias = 0
  151. }
  152. // Bias between new and old addresses.
  153. oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
  154. newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
  155. if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
  156. // pick random Old bucket.
  157. var bucket map[string]*knownAddress = nil
  158. for len(bucket) == 0 {
  159. bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
  160. }
  161. // pick a random ka from bucket.
  162. randIndex := a.rand.Intn(len(bucket))
  163. for _, ka := range bucket {
  164. if randIndex == 0 {
  165. return ka.Addr
  166. }
  167. randIndex--
  168. }
  169. PanicSanity("Should not happen")
  170. } else {
  171. // pick random New bucket.
  172. var bucket map[string]*knownAddress = nil
  173. for len(bucket) == 0 {
  174. bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
  175. }
  176. // pick a random ka from bucket.
  177. randIndex := a.rand.Intn(len(bucket))
  178. for _, ka := range bucket {
  179. if randIndex == 0 {
  180. return ka.Addr
  181. }
  182. randIndex--
  183. }
  184. PanicSanity("Should not happen")
  185. }
  186. return nil
  187. }
  188. func (a *AddrBook) MarkGood(addr *NetAddress) {
  189. a.mtx.Lock()
  190. defer a.mtx.Unlock()
  191. ka := a.addrLookup[addr.String()]
  192. if ka == nil {
  193. return
  194. }
  195. ka.markGood()
  196. if ka.isNew() {
  197. a.moveToOld(ka)
  198. }
  199. }
  200. func (a *AddrBook) MarkAttempt(addr *NetAddress) {
  201. a.mtx.Lock()
  202. defer a.mtx.Unlock()
  203. ka := a.addrLookup[addr.String()]
  204. if ka == nil {
  205. return
  206. }
  207. ka.markAttempt()
  208. }
  209. func (a *AddrBook) MarkBad(addr *NetAddress) {
  210. a.mtx.Lock()
  211. defer a.mtx.Unlock()
  212. ka := a.addrLookup[addr.String()]
  213. if ka == nil {
  214. return
  215. }
  216. // We currently just eject the address.
  217. // In the future, consider blacklisting.
  218. a.removeFromAllBuckets(ka)
  219. }
  220. /* Peer exchange */
  221. // GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
  222. func (a *AddrBook) GetSelection() []*NetAddress {
  223. a.mtx.Lock()
  224. defer a.mtx.Unlock()
  225. if a.size() == 0 {
  226. return nil
  227. }
  228. allAddr := make([]*NetAddress, a.size())
  229. i := 0
  230. for _, v := range a.addrLookup {
  231. allAddr[i] = v.Addr
  232. i++
  233. }
  234. numAddresses := MaxInt(
  235. MinInt(minGetSelection, len(allAddr)),
  236. len(allAddr)*getSelectionPercent/100)
  237. numAddresses = MinInt(maxGetSelection, numAddresses)
  238. // Fisher-Yates shuffle the array. We only need to do the first
  239. // `numAddresses' since we are throwing the rest.
  240. for i := 0; i < numAddresses; i++ {
  241. // pick a number between current index and the end
  242. j := rand.Intn(len(allAddr)-i) + i
  243. allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
  244. }
  245. // slice off the limit we are willing to share.
  246. return allAddr[:numAddresses]
  247. }
  248. /* Loading & Saving */
  249. type addrBookJSON struct {
  250. Key string
  251. Addrs []*knownAddress
  252. }
  253. func (a *AddrBook) saveToFile(filePath string) {
  254. // Compile Addrs
  255. addrs := []*knownAddress{}
  256. for _, ka := range a.addrLookup {
  257. addrs = append(addrs, ka)
  258. }
  259. aJSON := &addrBookJSON{
  260. Key: a.key,
  261. Addrs: addrs,
  262. }
  263. jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
  264. if err != nil {
  265. log.Error("Failed to save AddrBook to file", "err", err)
  266. return
  267. }
  268. err = WriteFileAtomic(filePath, jsonBytes)
  269. if err != nil {
  270. log.Error("Failed to save AddrBook to file", "file", filePath, "error", err)
  271. }
  272. }
  273. // Returns false if file does not exist.
  274. // Panics if file is corrupt.
  275. func (a *AddrBook) loadFromFile(filePath string) bool {
  276. // If doesn't exist, do nothing.
  277. _, err := os.Stat(filePath)
  278. if os.IsNotExist(err) {
  279. return false
  280. }
  281. // Load addrBookJSON{}
  282. r, err := os.Open(filePath)
  283. if err != nil {
  284. PanicCrisis(Fmt("Error opening file %s: %v", filePath, err))
  285. }
  286. defer r.Close()
  287. aJSON := &addrBookJSON{}
  288. dec := json.NewDecoder(r)
  289. err = dec.Decode(aJSON)
  290. if err != nil {
  291. PanicCrisis(Fmt("Error reading file %s: %v", filePath, err))
  292. }
  293. // Restore all the fields...
  294. // Restore the key
  295. a.key = aJSON.Key
  296. // Restore .addrNew & .addrOld
  297. for _, ka := range aJSON.Addrs {
  298. for _, bucketIndex := range ka.Buckets {
  299. bucket := a.getBucket(ka.BucketType, bucketIndex)
  300. bucket[ka.Addr.String()] = ka
  301. }
  302. a.addrLookup[ka.Addr.String()] = ka
  303. if ka.BucketType == bucketTypeNew {
  304. a.nNew++
  305. } else {
  306. a.nOld++
  307. }
  308. }
  309. return true
  310. }
  311. /* Private methods */
  312. func (a *AddrBook) saveRoutine() {
  313. dumpAddressTicker := time.NewTicker(dumpAddressInterval)
  314. out:
  315. for {
  316. select {
  317. case <-dumpAddressTicker.C:
  318. log.Info("Saving AddrBook to file", "size", a.Size())
  319. a.saveToFile(a.filePath)
  320. case <-a.Quit:
  321. break out
  322. }
  323. }
  324. dumpAddressTicker.Stop()
  325. a.saveToFile(a.filePath)
  326. a.wg.Done()
  327. log.Notice("Address handler done")
  328. }
  329. func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
  330. switch bucketType {
  331. case bucketTypeNew:
  332. return a.addrNew[bucketIdx]
  333. case bucketTypeOld:
  334. return a.addrOld[bucketIdx]
  335. default:
  336. PanicSanity("Should not happen")
  337. return nil
  338. }
  339. }
  340. // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
  341. // NOTE: currently it always returns true.
  342. func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
  343. // Sanity check
  344. if ka.isOld() {
  345. log.Warn(Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
  346. return false
  347. }
  348. addrStr := ka.Addr.String()
  349. bucket := a.getBucket(bucketTypeNew, bucketIdx)
  350. // Already exists?
  351. if _, ok := bucket[addrStr]; ok {
  352. return true
  353. }
  354. // Enforce max addresses.
  355. if len(bucket) > newBucketSize {
  356. log.Notice("new bucket is full, expiring old ")
  357. a.expireNew(bucketIdx)
  358. }
  359. // Add to bucket.
  360. bucket[addrStr] = ka
  361. if ka.addBucketRef(bucketIdx) == 1 {
  362. a.nNew++
  363. }
  364. // Ensure in addrLookup
  365. a.addrLookup[addrStr] = ka
  366. return true
  367. }
  368. // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
  369. func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
  370. // Sanity check
  371. if ka.isNew() {
  372. log.Warn(Fmt("Cannot add new address to old bucket: %v", ka))
  373. return false
  374. }
  375. if len(ka.Buckets) != 0 {
  376. log.Warn(Fmt("Cannot add already old address to another old bucket: %v", ka))
  377. return false
  378. }
  379. addrStr := ka.Addr.String()
  380. bucket := a.getBucket(bucketTypeNew, bucketIdx)
  381. // Already exists?
  382. if _, ok := bucket[addrStr]; ok {
  383. return true
  384. }
  385. // Enforce max addresses.
  386. if len(bucket) > oldBucketSize {
  387. return false
  388. }
  389. // Add to bucket.
  390. bucket[addrStr] = ka
  391. if ka.addBucketRef(bucketIdx) == 1 {
  392. a.nOld++
  393. }
  394. // Ensure in addrLookup
  395. a.addrLookup[addrStr] = ka
  396. return true
  397. }
  398. func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
  399. if ka.BucketType != bucketType {
  400. log.Warn(Fmt("Bucket type mismatch: %v", ka))
  401. return
  402. }
  403. bucket := a.getBucket(bucketType, bucketIdx)
  404. delete(bucket, ka.Addr.String())
  405. if ka.removeBucketRef(bucketIdx) == 0 {
  406. if bucketType == bucketTypeNew {
  407. a.nNew--
  408. } else {
  409. a.nOld--
  410. }
  411. delete(a.addrLookup, ka.Addr.String())
  412. }
  413. }
  414. func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
  415. for _, bucketIdx := range ka.Buckets {
  416. bucket := a.getBucket(ka.BucketType, bucketIdx)
  417. delete(bucket, ka.Addr.String())
  418. }
  419. ka.Buckets = nil
  420. if ka.BucketType == bucketTypeNew {
  421. a.nNew--
  422. } else {
  423. a.nOld--
  424. }
  425. delete(a.addrLookup, ka.Addr.String())
  426. }
  427. func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
  428. bucket := a.getBucket(bucketType, bucketIdx)
  429. var oldest *knownAddress
  430. for _, ka := range bucket {
  431. if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
  432. oldest = ka
  433. }
  434. }
  435. return oldest
  436. }
  437. func (a *AddrBook) addAddress(addr, src *NetAddress) {
  438. if !addr.Routable() {
  439. log.Warn(Fmt("Cannot add non-routable address %v", addr))
  440. return
  441. }
  442. if _, ok := a.ourAddrs[addr.String()]; ok {
  443. // Ignore our own listener address.
  444. return
  445. }
  446. ka := a.addrLookup[addr.String()]
  447. if ka != nil {
  448. // Already old.
  449. if ka.isOld() {
  450. return
  451. }
  452. // Already in max new buckets.
  453. if len(ka.Buckets) == maxNewBucketsPerAddress {
  454. return
  455. }
  456. // The more entries we have, the less likely we are to add more.
  457. factor := int32(2 * len(ka.Buckets))
  458. if a.rand.Int31n(factor) != 0 {
  459. return
  460. }
  461. } else {
  462. ka = newKnownAddress(addr, src)
  463. }
  464. bucket := a.calcNewBucket(addr, src)
  465. a.addToNewBucket(ka, bucket)
  466. log.Notice("Added new address", "address", addr, "total", a.size())
  467. }
  468. // Make space in the new buckets by expiring the really bad entries.
  469. // If no bad entries are available we remove the oldest.
  470. func (a *AddrBook) expireNew(bucketIdx int) {
  471. for addrStr, ka := range a.addrNew[bucketIdx] {
  472. // If an entry is bad, throw it away
  473. if ka.isBad() {
  474. log.Notice(Fmt("expiring bad address %v", addrStr))
  475. a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
  476. return
  477. }
  478. }
  479. // If we haven't thrown out a bad entry, throw out the oldest entry
  480. oldest := a.pickOldest(bucketTypeNew, bucketIdx)
  481. a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
  482. }
  483. // Promotes an address from new to old.
  484. // TODO: Move to old probabilistically.
  485. // The better a node is, the less likely it should be evicted from an old bucket.
  486. func (a *AddrBook) moveToOld(ka *knownAddress) {
  487. // Sanity check
  488. if ka.isOld() {
  489. log.Warn(Fmt("Cannot promote address that is already old %v", ka))
  490. return
  491. }
  492. if len(ka.Buckets) == 0 {
  493. log.Warn(Fmt("Cannot promote address that isn't in any new buckets %v", ka))
  494. return
  495. }
  496. // Remember one of the buckets in which ka is in.
  497. freedBucket := ka.Buckets[0]
  498. // Remove from all (new) buckets.
  499. a.removeFromAllBuckets(ka)
  500. // It's officially old now.
  501. ka.BucketType = bucketTypeOld
  502. // Try to add it to its oldBucket destination.
  503. oldBucketIdx := a.calcOldBucket(ka.Addr)
  504. added := a.addToOldBucket(ka, oldBucketIdx)
  505. if !added {
  506. // No room, must evict something
  507. oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
  508. a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
  509. // Find new bucket to put oldest in
  510. newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
  511. added := a.addToNewBucket(oldest, newBucketIdx)
  512. // No space in newBucket either, just put it in freedBucket from above.
  513. if !added {
  514. added := a.addToNewBucket(oldest, freedBucket)
  515. if !added {
  516. log.Warn(Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket))
  517. }
  518. }
  519. // Finally, add to bucket again.
  520. added = a.addToOldBucket(ka, oldBucketIdx)
  521. if !added {
  522. log.Warn(Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
  523. }
  524. }
  525. }
  526. // doublesha256( key + sourcegroup +
  527. // int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
  528. func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
  529. data1 := []byte{}
  530. data1 = append(data1, []byte(a.key)...)
  531. data1 = append(data1, []byte(groupKey(addr))...)
  532. data1 = append(data1, []byte(groupKey(src))...)
  533. hash1 := doubleSha256(data1)
  534. hash64 := binary.BigEndian.Uint64(hash1)
  535. hash64 %= newBucketsPerGroup
  536. var hashbuf [8]byte
  537. binary.BigEndian.PutUint64(hashbuf[:], hash64)
  538. data2 := []byte{}
  539. data2 = append(data2, []byte(a.key)...)
  540. data2 = append(data2, groupKey(src)...)
  541. data2 = append(data2, hashbuf[:]...)
  542. hash2 := doubleSha256(data2)
  543. return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
  544. }
  545. // doublesha256( key + group +
  546. // int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
  547. func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
  548. data1 := []byte{}
  549. data1 = append(data1, []byte(a.key)...)
  550. data1 = append(data1, []byte(addr.String())...)
  551. hash1 := doubleSha256(data1)
  552. hash64 := binary.BigEndian.Uint64(hash1)
  553. hash64 %= oldBucketsPerGroup
  554. var hashbuf [8]byte
  555. binary.BigEndian.PutUint64(hashbuf[:], hash64)
  556. data2 := []byte{}
  557. data2 = append(data2, []byte(a.key)...)
  558. data2 = append(data2, groupKey(addr)...)
  559. data2 = append(data2, hashbuf[:]...)
  560. hash2 := doubleSha256(data2)
  561. return int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
  562. }
  563. // Return a string representing the network group of this address.
  564. // This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
  565. // "local" for a local address and the string "unroutable for an unroutable
  566. // address.
  567. func groupKey(na *NetAddress) string {
  568. if na.Local() {
  569. return "local"
  570. }
  571. if !na.Routable() {
  572. return "unroutable"
  573. }
  574. if ipv4 := na.IP.To4(); ipv4 != nil {
  575. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
  576. }
  577. if na.RFC6145() || na.RFC6052() {
  578. // last four bytes are the ip address
  579. ip := net.IP(na.IP[12:16])
  580. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  581. }
  582. if na.RFC3964() {
  583. ip := net.IP(na.IP[2:7])
  584. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  585. }
  586. if na.RFC4380() {
  587. // teredo tunnels have the last 4 bytes as the v4 address XOR
  588. // 0xff.
  589. ip := net.IP(make([]byte, 4))
  590. for i, byte := range na.IP[12:16] {
  591. ip[i] = byte ^ 0xff
  592. }
  593. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  594. }
  595. // OK, so now we know ourselves to be a IPv6 address.
  596. // bitcoind uses /32 for everything, except for Hurricane Electric's
  597. // (he.net) IP range, which it uses /36 for.
  598. bits := 32
  599. heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
  600. Mask: net.CIDRMask(32, 128)}
  601. if heNet.Contains(na.IP) {
  602. bits = 36
  603. }
  604. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
  605. }
  606. //-----------------------------------------------------------------------------
  607. /*
  608. knownAddress
  609. tracks information about a known network address that is used
  610. to determine how viable an address is.
  611. */
  612. type knownAddress struct {
  613. Addr *NetAddress
  614. Src *NetAddress
  615. Attempts int32
  616. LastAttempt time.Time
  617. LastSuccess time.Time
  618. BucketType byte
  619. Buckets []int
  620. }
  621. func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
  622. return &knownAddress{
  623. Addr: addr,
  624. Src: src,
  625. Attempts: 0,
  626. LastAttempt: time.Now(),
  627. BucketType: bucketTypeNew,
  628. Buckets: nil,
  629. }
  630. }
  631. func (ka *knownAddress) isOld() bool {
  632. return ka.BucketType == bucketTypeOld
  633. }
  634. func (ka *knownAddress) isNew() bool {
  635. return ka.BucketType == bucketTypeNew
  636. }
  637. func (ka *knownAddress) markAttempt() {
  638. now := time.Now()
  639. ka.LastAttempt = now
  640. ka.Attempts += 1
  641. }
  642. func (ka *knownAddress) markGood() {
  643. now := time.Now()
  644. ka.LastAttempt = now
  645. ka.Attempts = 0
  646. ka.LastSuccess = now
  647. }
  648. func (ka *knownAddress) addBucketRef(bucketIdx int) int {
  649. for _, bucket := range ka.Buckets {
  650. if bucket == bucketIdx {
  651. log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
  652. return -1
  653. }
  654. }
  655. ka.Buckets = append(ka.Buckets, bucketIdx)
  656. return len(ka.Buckets)
  657. }
  658. func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
  659. buckets := []int{}
  660. for _, bucket := range ka.Buckets {
  661. if bucket != bucketIdx {
  662. buckets = append(buckets, bucket)
  663. }
  664. }
  665. if len(buckets) != len(ka.Buckets)-1 {
  666. log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
  667. return -1
  668. }
  669. ka.Buckets = buckets
  670. return len(ka.Buckets)
  671. }
  672. /*
  673. An address is bad if the address in question has not been tried in the last
  674. minute and meets one of the following criteria:
  675. 1) It claims to be from the future
  676. 2) It hasn't been seen in over a month
  677. 3) It has failed at least three times and never succeeded
  678. 4) It has failed ten times in the last week
  679. All addresses that meet these criteria are assumed to be worthless and not
  680. worth keeping hold of.
  681. */
  682. func (ka *knownAddress) isBad() bool {
  683. // Has been attempted in the last minute --> good
  684. if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
  685. return false
  686. }
  687. // Over a month old?
  688. if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
  689. return true
  690. }
  691. // Never succeeded?
  692. if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
  693. return true
  694. }
  695. // Hasn't succeeded in too long?
  696. if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
  697. ka.Attempts >= maxFailures {
  698. return true
  699. }
  700. return false
  701. }