You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

784 lines
19 KiB

11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
  1. // Modified for Tendermint
  2. // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
  3. // https://github.com/conformal/btcd/blob/master/LICENSE
  4. package p2p
  5. import (
  6. crand "crypto/rand" // for seeding
  7. "encoding/binary"
  8. "encoding/json"
  9. "fmt"
  10. "io"
  11. "math"
  12. "math/rand"
  13. "net"
  14. "os"
  15. "sync"
  16. "sync/atomic"
  17. "time"
  18. )
  19. /* AddrBook - concurrency safe peer address manager */
  20. type AddrBook struct {
  21. filePath string
  22. mtx sync.Mutex
  23. rand *rand.Rand
  24. key [32]byte
  25. addrLookup map[string]*knownAddress // new & old
  26. addrNew []map[string]*knownAddress
  27. addrOld []map[string]*knownAddress
  28. started uint32
  29. stopped uint32
  30. wg sync.WaitGroup
  31. quit chan struct{}
  32. nOld int
  33. nNew int
  34. }
  35. const (
  36. bucketTypeNew = 0x01
  37. bucketTypeOld = 0x02
  38. )
  39. const (
  40. // addresses under which the address manager will claim to need more addresses.
  41. needAddressThreshold = 1000
  42. // interval used to dump the address cache to disk for future use.
  43. dumpAddressInterval = time.Minute * 2
  44. // max addresses in each old address bucket.
  45. oldBucketSize = 64
  46. // buckets we split old addresses over.
  47. oldBucketCount = 64
  48. // max addresses in each new address bucket.
  49. newBucketSize = 64
  50. // buckets that we spread new addresses over.
  51. newBucketCount = 256
  52. // old buckets over which an address group will be spread.
  53. oldBucketsPerGroup = 4
  54. // new buckets over which an source address group will be spread.
  55. newBucketsPerGroup = 32
  56. // buckets a frequently seen new address may end up in.
  57. maxNewBucketsPerAddress = 4
  58. // days before which we assume an address has vanished
  59. // if we have not seen it announced in that long.
  60. numMissingDays = 30
  61. // tries without a single success before we assume an address is bad.
  62. numRetries = 3
  63. // max failures we will accept without a success before considering an address bad.
  64. maxFailures = 10
  65. // days since the last success before we will consider evicting an address.
  66. minBadDays = 7
  67. // max addresses that we will send in response to a GetSelection
  68. getSelectionMax = 2500
  69. // % of total addresses known that we will share with a call to GetSelection
  70. getSelectionPercent = 23
  71. // current version of the on-disk format.
  72. serializationVersion = 1
  73. )
  74. // Use Start to begin processing asynchronous address updates.
  75. func NewAddrBook(filePath string) *AddrBook {
  76. am := AddrBook{
  77. rand: rand.New(rand.NewSource(time.Now().UnixNano())),
  78. quit: make(chan struct{}),
  79. filePath: filePath,
  80. }
  81. am.init()
  82. return &am
  83. }
  84. // When modifying this, don't forget to update loadFromFile()
  85. func (a *AddrBook) init() {
  86. io.ReadFull(crand.Reader, a.key[:])
  87. // addr -> ka index
  88. a.addrLookup = make(map[string]*knownAddress)
  89. // New addr buckets
  90. a.addrNew = make([]map[string]*knownAddress, newBucketCount)
  91. for i := range a.addrNew {
  92. a.addrNew[i] = make(map[string]*knownAddress)
  93. }
  94. // Old addr buckets
  95. a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
  96. for i := range a.addrOld {
  97. a.addrOld[i] = make(map[string]*knownAddress)
  98. }
  99. }
  100. func (a *AddrBook) Start() {
  101. if atomic.CompareAndSwapUint32(&a.started, 0, 1) {
  102. log.Info("Starting address manager")
  103. a.loadFromFile(a.filePath)
  104. a.wg.Add(1)
  105. go a.saveHandler()
  106. }
  107. }
  108. func (a *AddrBook) Stop() {
  109. if atomic.CompareAndSwapUint32(&a.stopped, 0, 1) {
  110. log.Info("Stopping address manager")
  111. close(a.quit)
  112. a.wg.Wait()
  113. }
  114. }
  115. func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
  116. a.mtx.Lock()
  117. defer a.mtx.Unlock()
  118. a.addAddress(addr, src)
  119. }
  120. func (a *AddrBook) NeedMoreAddresses() bool {
  121. return a.Size() < needAddressThreshold
  122. }
  123. func (a *AddrBook) Size() int {
  124. a.mtx.Lock()
  125. defer a.mtx.Unlock()
  126. return a.size()
  127. }
  128. func (a *AddrBook) size() int {
  129. return a.nNew + a.nOld
  130. }
  131. // Pick an address to connect to with new/old bias.
  132. func (a *AddrBook) PickAddress(newBias int) *NetAddress {
  133. a.mtx.Lock()
  134. defer a.mtx.Unlock()
  135. if a.size() == 0 {
  136. return nil
  137. }
  138. if newBias > 100 {
  139. newBias = 100
  140. }
  141. if newBias < 0 {
  142. newBias = 0
  143. }
  144. // Bias between new and old addresses.
  145. oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
  146. newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
  147. if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
  148. // pick random Old bucket.
  149. var bucket map[string]*knownAddress = nil
  150. for len(bucket) == 0 {
  151. bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
  152. }
  153. // pick a random ka from bucket.
  154. randIndex := a.rand.Intn(len(bucket))
  155. for _, ka := range bucket {
  156. if randIndex == 0 {
  157. return ka.Addr
  158. }
  159. randIndex--
  160. }
  161. panic("Should not happen")
  162. } else {
  163. // pick random New bucket.
  164. var bucket map[string]*knownAddress = nil
  165. for len(bucket) == 0 {
  166. bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
  167. }
  168. // pick a random ka from bucket.
  169. randIndex := a.rand.Intn(len(bucket))
  170. for _, ka := range bucket {
  171. if randIndex == 0 {
  172. return ka.Addr
  173. }
  174. randIndex--
  175. }
  176. panic("Should not happen")
  177. }
  178. return nil
  179. }
  180. func (a *AddrBook) MarkGood(addr *NetAddress) {
  181. a.mtx.Lock()
  182. defer a.mtx.Unlock()
  183. ka := a.addrLookup[addr.String()]
  184. if ka == nil {
  185. return
  186. }
  187. ka.markGood()
  188. if ka.isNew() {
  189. a.moveToOld(ka)
  190. }
  191. }
  192. func (a *AddrBook) MarkAttempt(addr *NetAddress) {
  193. a.mtx.Lock()
  194. defer a.mtx.Unlock()
  195. ka := a.addrLookup[addr.String()]
  196. if ka == nil {
  197. return
  198. }
  199. ka.markAttempt()
  200. }
  201. func (a *AddrBook) MarkBad(addr *NetAddress) {
  202. a.mtx.Lock()
  203. defer a.mtx.Unlock()
  204. ka := a.addrLookup[addr.String()]
  205. if ka == nil {
  206. return
  207. }
  208. // We currently just eject the address.
  209. // In the future, consider blacklisting.
  210. a.removeFromAllBuckets(ka)
  211. }
  212. /* Peer exchange */
  213. // GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
  214. func (a *AddrBook) GetSelection() []*NetAddress {
  215. a.mtx.Lock()
  216. defer a.mtx.Unlock()
  217. if a.size() == 0 {
  218. return nil
  219. }
  220. allAddr := make([]*NetAddress, a.size())
  221. i := 0
  222. for _, v := range a.addrLookup {
  223. allAddr[i] = v.Addr
  224. i++
  225. }
  226. numAddresses := len(allAddr) * getSelectionPercent / 100
  227. if numAddresses > getSelectionMax {
  228. numAddresses = getSelectionMax
  229. }
  230. // Fisher-Yates shuffle the array. We only need to do the first
  231. // `numAddresses' since we are throwing the rest.
  232. for i := 0; i < numAddresses; i++ {
  233. // pick a number between current index and the end
  234. j := rand.Intn(len(allAddr)-i) + i
  235. allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
  236. }
  237. // slice off the limit we are willing to share.
  238. return allAddr[:numAddresses]
  239. }
  240. /* Loading & Saving */
  241. type addrBookJSON struct {
  242. Key [32]byte
  243. Addrs []*knownAddress
  244. }
  245. func (a *AddrBook) saveToFile(filePath string) {
  246. // Compile Addrs
  247. addrs := []*knownAddress{}
  248. for _, ka := range a.addrLookup {
  249. addrs = append(addrs, ka)
  250. }
  251. aJSON := &addrBookJSON{
  252. Key: a.key,
  253. Addrs: addrs,
  254. }
  255. w, err := os.Create(filePath)
  256. if err != nil {
  257. log.Error("Error opening file: ", filePath, err)
  258. return
  259. }
  260. enc := json.NewEncoder(w)
  261. defer w.Close()
  262. err = enc.Encode(&aJSON)
  263. if err != nil {
  264. panic(err)
  265. }
  266. }
  267. func (a *AddrBook) loadFromFile(filePath string) {
  268. // If doesn't exist, do nothing.
  269. _, err := os.Stat(filePath)
  270. if os.IsNotExist(err) {
  271. return
  272. }
  273. // Load addrBookJSON{}
  274. r, err := os.Open(filePath)
  275. if err != nil {
  276. panic(fmt.Errorf("%s error opening file: %v", filePath, err))
  277. }
  278. defer r.Close()
  279. aJSON := &addrBookJSON{}
  280. dec := json.NewDecoder(r)
  281. err = dec.Decode(aJSON)
  282. if err != nil {
  283. panic(fmt.Errorf("error reading %s: %v", filePath, err))
  284. }
  285. // Restore all the fields...
  286. // Restore the key
  287. copy(a.key[:], aJSON.Key[:])
  288. // Restore .addrNew & .addrOld
  289. for _, ka := range aJSON.Addrs {
  290. for _, bucketIndex := range ka.Buckets {
  291. bucket := a.getBucket(ka.BucketType, bucketIndex)
  292. bucket[ka.Addr.String()] = ka
  293. }
  294. a.addrLookup[ka.Addr.String()] = ka
  295. if ka.BucketType == bucketTypeNew {
  296. a.nNew++
  297. } else {
  298. a.nOld++
  299. }
  300. }
  301. }
  302. /* Private methods */
  303. func (a *AddrBook) saveHandler() {
  304. dumpAddressTicker := time.NewTicker(dumpAddressInterval)
  305. out:
  306. for {
  307. select {
  308. case <-dumpAddressTicker.C:
  309. a.saveToFile(a.filePath)
  310. case <-a.quit:
  311. break out
  312. }
  313. }
  314. dumpAddressTicker.Stop()
  315. a.saveToFile(a.filePath)
  316. a.wg.Done()
  317. log.Info("Address handler done")
  318. }
  319. func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
  320. switch bucketType {
  321. case bucketTypeNew:
  322. return a.addrNew[bucketIdx]
  323. case bucketTypeOld:
  324. return a.addrOld[bucketIdx]
  325. default:
  326. panic("Should not happen")
  327. }
  328. }
  329. // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
  330. // NOTE: currently it always returns true.
  331. func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
  332. // Sanity check
  333. if ka.isOld() {
  334. panic("Cannot add address already in old bucket to a new bucket")
  335. }
  336. key := ka.Addr.String()
  337. bucket := a.getBucket(bucketTypeNew, bucketIdx)
  338. // Already exists?
  339. if _, ok := bucket[key]; ok {
  340. return true
  341. }
  342. // Enforce max addresses.
  343. if len(bucket) > newBucketSize {
  344. log.Info("new bucket is full, expiring old ")
  345. a.expireNew(bucketIdx)
  346. }
  347. // Add to bucket.
  348. bucket[key] = ka
  349. if ka.addBucketRef(bucketIdx) == 1 {
  350. a.nNew++
  351. }
  352. // Ensure in addrLookup
  353. a.addrLookup[key] = ka
  354. return true
  355. }
  356. // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
  357. func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
  358. // Sanity check
  359. if ka.isNew() {
  360. panic("Cannot add new address to old bucket")
  361. }
  362. if len(ka.Buckets) != 0 {
  363. panic("Cannot add already old address to another old bucket")
  364. }
  365. key := ka.Addr.String()
  366. bucket := a.getBucket(bucketTypeNew, bucketIdx)
  367. // Already exists?
  368. if _, ok := bucket[key]; ok {
  369. return true
  370. }
  371. // Enforce max addresses.
  372. if len(bucket) > oldBucketSize {
  373. return false
  374. }
  375. // Add to bucket.
  376. bucket[key] = ka
  377. if ka.addBucketRef(bucketIdx) == 1 {
  378. a.nOld++
  379. }
  380. // Ensure in addrLookup
  381. a.addrLookup[key] = ka
  382. return true
  383. }
  384. func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
  385. if ka.BucketType != bucketType {
  386. panic("Bucket type mismatch")
  387. }
  388. bucket := a.getBucket(bucketType, bucketIdx)
  389. delete(bucket, ka.Addr.String())
  390. if ka.removeBucketRef(bucketIdx) == 0 {
  391. if bucketType == bucketTypeNew {
  392. a.nNew--
  393. } else {
  394. a.nOld--
  395. }
  396. delete(a.addrLookup, ka.Addr.String())
  397. }
  398. }
  399. func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
  400. for _, bucketIdx := range ka.Buckets {
  401. bucket := a.getBucket(ka.BucketType, bucketIdx)
  402. delete(bucket, ka.Addr.String())
  403. }
  404. ka.Buckets = nil
  405. if ka.BucketType == bucketTypeNew {
  406. a.nNew--
  407. } else {
  408. a.nOld--
  409. }
  410. delete(a.addrLookup, ka.Addr.String())
  411. }
  412. func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
  413. bucket := a.getBucket(bucketType, bucketIdx)
  414. var oldest *knownAddress
  415. for _, ka := range bucket {
  416. if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
  417. oldest = ka
  418. }
  419. }
  420. return oldest
  421. }
  422. func (a *AddrBook) addAddress(addr, src *NetAddress) {
  423. if !addr.Routable() {
  424. panic("Cannot add non-routable address")
  425. }
  426. ka := a.addrLookup[addr.String()]
  427. if ka != nil {
  428. // Already old.
  429. if ka.isOld() {
  430. return
  431. }
  432. // Already in max new buckets.
  433. if len(ka.Buckets) == maxNewBucketsPerAddress {
  434. return
  435. }
  436. // The more entries we have, the less likely we are to add more.
  437. factor := int32(2 * len(ka.Buckets))
  438. if a.rand.Int31n(factor) != 0 {
  439. return
  440. }
  441. } else {
  442. ka = newKnownAddress(addr, src)
  443. }
  444. bucket := a.calcNewBucket(addr, src)
  445. a.addToNewBucket(ka, bucket)
  446. log.Info("Added new address %s for a total of %d addresses", addr, a.size())
  447. }
  448. // Make space in the new buckets by expiring the really bad entries.
  449. // If no bad entries are available we remove the oldest.
  450. func (a *AddrBook) expireNew(bucketIdx int) {
  451. for key, ka := range a.addrNew[bucketIdx] {
  452. // If an entry is bad, throw it away
  453. if ka.isBad() {
  454. log.Info("expiring bad address %v", key)
  455. a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
  456. return
  457. }
  458. }
  459. // If we haven't thrown out a bad entry, throw out the oldest entry
  460. oldest := a.pickOldest(bucketTypeNew, bucketIdx)
  461. a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
  462. }
  463. // Promotes an address from new to old.
  464. // TODO: Move to old probabilistically.
  465. // The better a node is, the less likely it should be evicted from an old bucket.
  466. func (a *AddrBook) moveToOld(ka *knownAddress) {
  467. // Sanity check
  468. if ka.isOld() {
  469. panic("Cannot promote address that is already old")
  470. }
  471. if len(ka.Buckets) == 0 {
  472. panic("Cannot promote address that isn't in any new buckets")
  473. }
  474. // Remember one of the buckets in which ka is in.
  475. freedBucket := ka.Buckets[0]
  476. // Remove from all (new) buckets.
  477. a.removeFromAllBuckets(ka)
  478. // It's officially old now.
  479. ka.BucketType = bucketTypeOld
  480. // Try to add it to its oldBucket destination.
  481. oldBucketIdx := a.calcOldBucket(ka.Addr)
  482. added := a.addToOldBucket(ka, oldBucketIdx)
  483. if !added {
  484. // No room, must evict something
  485. oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
  486. a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
  487. // Find new bucket to put oldest in
  488. newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
  489. added := a.addToNewBucket(oldest, newBucketIdx)
  490. // No space in newBucket either, just put it in freedBucket from above.
  491. if !added {
  492. added := a.addToNewBucket(oldest, freedBucket)
  493. if !added {
  494. log.Warning("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket)
  495. }
  496. }
  497. // Finally, add to bucket again.
  498. added = a.addToOldBucket(ka, oldBucketIdx)
  499. if !added {
  500. log.Warning("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)
  501. }
  502. }
  503. }
  504. // doublesha256(key + sourcegroup +
  505. // int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckes
  506. func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
  507. data1 := []byte{}
  508. data1 = append(data1, a.key[:]...)
  509. data1 = append(data1, []byte(groupKey(addr))...)
  510. data1 = append(data1, []byte(groupKey(src))...)
  511. hash1 := doubleSha256(data1)
  512. hash64 := binary.LittleEndian.Uint64(hash1)
  513. hash64 %= newBucketsPerGroup
  514. var hashbuf [8]byte
  515. binary.LittleEndian.PutUint64(hashbuf[:], hash64)
  516. data2 := []byte{}
  517. data2 = append(data2, a.key[:]...)
  518. data2 = append(data2, groupKey(src)...)
  519. data2 = append(data2, hashbuf[:]...)
  520. hash2 := doubleSha256(data2)
  521. return int(binary.LittleEndian.Uint64(hash2) % newBucketCount)
  522. }
  523. // doublesha256(key + group + truncate_to_64bits(doublesha256(key + addr))%buckets_per_group) % num_buckets
  524. func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
  525. data1 := []byte{}
  526. data1 = append(data1, a.key[:]...)
  527. data1 = append(data1, []byte(addr.String())...)
  528. hash1 := doubleSha256(data1)
  529. hash64 := binary.LittleEndian.Uint64(hash1)
  530. hash64 %= oldBucketsPerGroup
  531. var hashbuf [8]byte
  532. binary.LittleEndian.PutUint64(hashbuf[:], hash64)
  533. data2 := []byte{}
  534. data2 = append(data2, a.key[:]...)
  535. data2 = append(data2, groupKey(addr)...)
  536. data2 = append(data2, hashbuf[:]...)
  537. hash2 := doubleSha256(data2)
  538. return int(binary.LittleEndian.Uint64(hash2) % oldBucketCount)
  539. }
  540. // Return a string representing the network group of this address.
  541. // This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
  542. // "local" for a local address and the string "unroutable for an unroutable
  543. // address.
  544. func groupKey(na *NetAddress) string {
  545. if na.Local() {
  546. return "local"
  547. }
  548. if !na.Routable() {
  549. return "unroutable"
  550. }
  551. if ipv4 := na.IP.To4(); ipv4 != nil {
  552. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
  553. }
  554. if na.RFC6145() || na.RFC6052() {
  555. // last four bytes are the ip address
  556. ip := net.IP(na.IP[12:16])
  557. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  558. }
  559. if na.RFC3964() {
  560. ip := net.IP(na.IP[2:7])
  561. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  562. }
  563. if na.RFC4380() {
  564. // teredo tunnels have the last 4 bytes as the v4 address XOR
  565. // 0xff.
  566. ip := net.IP(make([]byte, 4))
  567. for i, byte := range na.IP[12:16] {
  568. ip[i] = byte ^ 0xff
  569. }
  570. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  571. }
  572. // OK, so now we know ourselves to be a IPv6 address.
  573. // bitcoind uses /32 for everything, except for Hurricane Electric's
  574. // (he.net) IP range, which it uses /36 for.
  575. bits := 32
  576. heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
  577. Mask: net.CIDRMask(32, 128)}
  578. if heNet.Contains(na.IP) {
  579. bits = 36
  580. }
  581. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
  582. }
  583. //-----------------------------------------------------------------------------
  584. /*
  585. knownAddress
  586. tracks information about a known network address that is used
  587. to determine how viable an address is.
  588. */
  589. type knownAddress struct {
  590. Addr *NetAddress
  591. Src *NetAddress
  592. Attempts uint32
  593. LastAttempt time.Time
  594. LastSuccess time.Time
  595. BucketType byte
  596. Buckets []int
  597. }
  598. func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
  599. return &knownAddress{
  600. Addr: addr,
  601. Src: src,
  602. Attempts: 0,
  603. LastAttempt: time.Now(),
  604. BucketType: bucketTypeNew,
  605. Buckets: nil,
  606. }
  607. }
  608. func (ka *knownAddress) isOld() bool {
  609. return ka.BucketType == bucketTypeOld
  610. }
  611. func (ka *knownAddress) isNew() bool {
  612. return ka.BucketType == bucketTypeNew
  613. }
  614. func (ka *knownAddress) markAttempt() {
  615. now := time.Now()
  616. ka.LastAttempt = now
  617. ka.Attempts += 1
  618. }
  619. func (ka *knownAddress) markGood() {
  620. now := time.Now()
  621. ka.LastAttempt = now
  622. ka.Attempts = 0
  623. ka.LastSuccess = now
  624. }
  625. func (ka *knownAddress) addBucketRef(bucketIdx int) int {
  626. for _, bucket := range ka.Buckets {
  627. if bucket == bucketIdx {
  628. panic("Bucket already exists in ka.Buckets")
  629. }
  630. }
  631. ka.Buckets = append(ka.Buckets, bucketIdx)
  632. return len(ka.Buckets)
  633. }
  634. func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
  635. buckets := []int{}
  636. for _, bucket := range ka.Buckets {
  637. if bucket != bucketIdx {
  638. buckets = append(buckets, bucket)
  639. }
  640. }
  641. if len(buckets) != len(ka.Buckets)-1 {
  642. panic("bucketIdx not found in ka.Buckets")
  643. }
  644. ka.Buckets = buckets
  645. return len(ka.Buckets)
  646. }
  647. /*
  648. An address is bad if the address in question has not been tried in the last
  649. minute and meets one of the following criteria:
  650. 1) It claims to be from the future
  651. 2) It hasn't been seen in over a month
  652. 3) It has failed at least three times and never succeeded
  653. 4) It has failed ten times in the last week
  654. All addresses that meet these criteria are assumed to be worthless and not
  655. worth keeping hold of.
  656. */
  657. func (ka *knownAddress) isBad() bool {
  658. // Has been attempted in the last minute --> good
  659. if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
  660. return false
  661. }
  662. // Over a month old?
  663. if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
  664. return true
  665. }
  666. // Never succeeded?
  667. if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
  668. return true
  669. }
  670. // Hasn't succeeded in too long?
  671. if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
  672. ka.Attempts >= maxFailures {
  673. return true
  674. }
  675. return false
  676. }