You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

520 lines
14 KiB

10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
  1. // Modified for Tendermint
  2. // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
  3. // https://github.com/conformal/btcd/blob/master/LICENSE
  4. package peer
  5. import (
  6. . "github.com/tendermint/tendermint/binary"
  7. crand "crypto/rand" // for seeding
  8. "encoding/binary"
  9. "encoding/json"
  10. "io"
  11. "math"
  12. "math/rand"
  13. "net"
  14. "sync"
  15. "sync/atomic"
  16. "time"
  17. "os"
  18. "fmt"
  19. )
  20. /* AddrBook - concurrency safe peer address manager */
  21. type AddrBook struct {
  22. filePath string
  23. mtx sync.Mutex
  24. rand *rand.Rand
  25. key [32]byte
  26. addrIndex map[string]*KnownAddress // addr.String() -> KnownAddress
  27. addrNew [newBucketCount]map[string]*KnownAddress
  28. addrOld [oldBucketCount][]*KnownAddress
  29. started int32
  30. shutdown int32
  31. wg sync.WaitGroup
  32. quit chan struct{}
  33. nOld int
  34. nNew int
  35. }
  36. const (
  37. // addresses under which the address manager will claim to need more addresses.
  38. needAddressThreshold = 1000
  39. // interval used to dump the address cache to disk for future use.
  40. dumpAddressInterval = time.Minute * 2
  41. // max addresses in each old address bucket.
  42. oldBucketSize = 64
  43. // buckets we split old addresses over.
  44. oldBucketCount = 64
  45. // max addresses in each new address bucket.
  46. newBucketSize = 64
  47. // buckets that we spread new addresses over.
  48. newBucketCount = 256
  49. // old buckets over which an address group will be spread.
  50. oldBucketsPerGroup = 4
  51. // new buckets over which an source address group will be spread.
  52. newBucketsPerGroup = 32
  53. // buckets a frequently seen new address may end up in.
  54. newBucketsPerAddress = 4
  55. // days before which we assume an address has vanished
  56. // if we have not seen it announced in that long.
  57. numMissingDays = 30
  58. // tries without a single success before we assume an address is bad.
  59. numRetries = 3
  60. // max failures we will accept without a success before considering an address bad.
  61. maxFailures = 10
  62. // days since the last success before we will consider evicting an address.
  63. minBadDays = 7
  64. // max addresses that we will send in response to a getAddr
  65. // (in practise the most addresses we will return from a call to AddressCache()).
  66. getAddrMax = 2500
  67. // % of total addresses known that we will share with a call to AddressCache.
  68. getAddrPercent = 23
  69. // current version of the on-disk format.
  70. serialisationVersion = 1
  71. )
  72. // Use Start to begin processing asynchronous address updates.
  73. func NewAddrBook(filePath string) *AddrBook {
  74. am := AddrBook{
  75. rand: rand.New(rand.NewSource(time.Now().UnixNano())),
  76. quit: make(chan struct{}),
  77. filePath: filePath,
  78. }
  79. am.init()
  80. return &am
  81. }
  82. // When modifying this, don't forget to update loadFromFile()
  83. func (a *AddrBook) init() {
  84. a.addrIndex = make(map[string]*KnownAddress)
  85. io.ReadFull(crand.Reader, a.key[:])
  86. for i := range a.addrNew {
  87. a.addrNew[i] = make(map[string]*KnownAddress)
  88. }
  89. for i := range a.addrOld {
  90. a.addrOld[i] = make([]*KnownAddress, 0, oldBucketSize)
  91. }
  92. }
  93. func (a *AddrBook) Start() {
  94. if atomic.AddInt32(&a.started, 1) != 1 { return }
  95. log.Trace("Starting address manager")
  96. a.loadFromFile(a.filePath)
  97. a.wg.Add(1)
  98. go a.addressHandler()
  99. }
  100. func (a *AddrBook) Stop() {
  101. if atomic.AddInt32(&a.shutdown, 1) != 1 { return }
  102. log.Infof("Address manager shutting down")
  103. close(a.quit)
  104. a.wg.Wait()
  105. }
  106. func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
  107. a.mtx.Lock(); defer a.mtx.Unlock()
  108. a.addAddress(addr, src)
  109. }
  110. func (a *AddrBook) NeedMoreAddresses() bool {
  111. return a.NumAddresses() < needAddressThreshold
  112. }
  113. func (a *AddrBook) NumAddresses() int {
  114. a.mtx.Lock(); defer a.mtx.Unlock()
  115. return a.nOld + a.nNew
  116. }
  117. // Pick a new address to connect to.
  118. func (a *AddrBook) PickAddress(class string, newBias int) *KnownAddress {
  119. a.mtx.Lock(); defer a.mtx.Unlock()
  120. if a.nOld == 0 && a.nNew == 0 { return nil }
  121. if newBias > 100 { newBias = 100 }
  122. if newBias < 0 { newBias = 0 }
  123. // Bias between new and old addresses.
  124. oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
  125. newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
  126. if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
  127. // pick random Old bucket.
  128. var bucket []*KnownAddress = nil
  129. for len(bucket) == 0 {
  130. bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
  131. }
  132. // pick a random ka from bucket.
  133. return bucket[a.rand.Intn(len(bucket))]
  134. } else {
  135. // pick random New bucket.
  136. var bucket map[string]*KnownAddress = nil
  137. for len(bucket) == 0 {
  138. bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
  139. }
  140. // pick a random ka from bucket.
  141. randIndex := a.rand.Intn(len(bucket))
  142. for _, ka := range bucket {
  143. randIndex--
  144. if randIndex == 0 {
  145. return ka
  146. }
  147. }
  148. panic("Should not happen")
  149. }
  150. return nil
  151. }
  152. func (a *AddrBook) MarkGood(addr *NetAddress) {
  153. a.mtx.Lock(); defer a.mtx.Unlock()
  154. ka := a.addrIndex[addr.String()]
  155. if ka == nil { return }
  156. ka.MarkAttempt(true)
  157. if ka.OldBucket == -1 {
  158. a.moveToOld(ka)
  159. }
  160. }
  161. func (a *AddrBook) MarkAttempt(addr *NetAddress) {
  162. a.mtx.Lock(); defer a.mtx.Unlock()
  163. ka := a.addrIndex[addr.String()]
  164. if ka == nil { return }
  165. ka.MarkAttempt(false)
  166. }
  167. /* Loading & Saving */
  168. type addrBookJSON struct {
  169. Key [32]byte
  170. AddrNew [newBucketCount]map[string]*KnownAddress
  171. AddrOld [oldBucketCount][]*KnownAddress
  172. NOld int
  173. NNew int
  174. }
  175. func (a *AddrBook) saveToFile(filePath string) {
  176. aJSON := &addrBookJSON{
  177. Key: a.key,
  178. AddrNew: a.addrNew,
  179. AddrOld: a.addrOld,
  180. NOld: a.nOld,
  181. NNew: a.nNew,
  182. }
  183. w, err := os.Create(filePath)
  184. if err != nil {
  185. log.Error("Error opening file: ", filePath, err)
  186. return
  187. }
  188. enc := json.NewEncoder(w)
  189. defer w.Close()
  190. err = enc.Encode(&aJSON)
  191. if err != nil { panic(err) }
  192. }
  193. func (a *AddrBook) loadFromFile(filePath string) {
  194. // If doesn't exist, do nothing.
  195. _, err := os.Stat(filePath)
  196. if os.IsNotExist(err) { return }
  197. // Load addrBookJSON{}
  198. r, err := os.Open(filePath)
  199. if err != nil {
  200. panic(fmt.Errorf("%s error opening file: %v", filePath, err))
  201. }
  202. defer r.Close()
  203. aJSON := &addrBookJSON{}
  204. dec := json.NewDecoder(r)
  205. err = dec.Decode(aJSON)
  206. if err != nil {
  207. panic(fmt.Errorf("error reading %s: %v", filePath, err))
  208. }
  209. // Now we need to initialize self.
  210. copy(a.key[:], aJSON.Key[:])
  211. a.addrNew = aJSON.AddrNew
  212. for i, oldBucket := range aJSON.AddrOld {
  213. copy(a.addrOld[i], oldBucket)
  214. }
  215. a.nNew = aJSON.NNew
  216. a.nOld = aJSON.NOld
  217. a.addrIndex = make(map[string]*KnownAddress)
  218. for _, newBucket := range a.addrNew {
  219. for key, ka := range newBucket {
  220. a.addrIndex[key] = ka
  221. }
  222. }
  223. }
  224. /* Private methods */
  225. func (a *AddrBook) addressHandler() {
  226. dumpAddressTicker := time.NewTicker(dumpAddressInterval)
  227. out:
  228. for {
  229. select {
  230. case <-dumpAddressTicker.C:
  231. a.saveToFile(a.filePath)
  232. case <-a.quit:
  233. break out
  234. }
  235. }
  236. dumpAddressTicker.Stop()
  237. a.saveToFile(a.filePath)
  238. a.wg.Done()
  239. log.Trace("Address handler done")
  240. }
  241. func (a *AddrBook) addAddress(addr, src *NetAddress) {
  242. if !addr.Routable() { return }
  243. key := addr.String()
  244. ka := a.addrIndex[key]
  245. if ka != nil {
  246. // Already added
  247. if ka.OldBucket != -1 { return }
  248. if ka.NewRefs == newBucketsPerAddress { return }
  249. // The more entries we have, the less likely we are to add more.
  250. factor := int32(2 * ka.NewRefs)
  251. if a.rand.Int31n(factor) != 0 {
  252. return
  253. }
  254. } else {
  255. ka = NewKnownAddress(addr, src)
  256. a.addrIndex[key] = ka
  257. a.nNew++
  258. }
  259. bucket := a.getNewBucket(addr, src)
  260. // Already exists?
  261. if _, ok := a.addrNew[bucket][key]; ok {
  262. return
  263. }
  264. // Enforce max addresses.
  265. if len(a.addrNew[bucket]) > newBucketSize {
  266. log.Tracef("new bucket is full, expiring old ")
  267. a.expireNew(bucket)
  268. }
  269. // Add to new bucket.
  270. ka.NewRefs++
  271. a.addrNew[bucket][key] = ka
  272. log.Tracef("Added new address %s for a total of %d addresses", addr, a.nOld+a.nNew)
  273. }
  274. // Make space in the new buckets by expiring the really bad entries.
  275. // If no bad entries are available we look at a few and remove the oldest.
  276. func (a *AddrBook) expireNew(bucket int) {
  277. var oldest *KnownAddress
  278. for k, v := range a.addrNew[bucket] {
  279. // If an entry is bad, throw it away
  280. if v.Bad() {
  281. log.Tracef("expiring bad address %v", k)
  282. delete(a.addrNew[bucket], k)
  283. v.NewRefs--
  284. if v.NewRefs == 0 {
  285. a.nNew--
  286. delete(a.addrIndex, k)
  287. }
  288. return
  289. }
  290. // or, keep track of the oldest entry
  291. if oldest == nil {
  292. oldest = v
  293. } else if v.LastAttempt.Before(oldest.LastAttempt.Time) {
  294. oldest = v
  295. }
  296. }
  297. // If we haven't thrown out a bad entry, throw out the oldest entry
  298. if oldest != nil {
  299. key := oldest.Addr.String()
  300. log.Tracef("expiring oldest address %v", key)
  301. delete(a.addrNew[bucket], key)
  302. oldest.NewRefs--
  303. if oldest.NewRefs == 0 {
  304. a.nNew--
  305. delete(a.addrIndex, key)
  306. }
  307. }
  308. }
  309. func (a *AddrBook) moveToOld(ka *KnownAddress) {
  310. // Remove from all new buckets.
  311. // Remember one of those new buckets.
  312. addrKey := ka.Addr.String()
  313. freedBucket := -1
  314. for i := range a.addrNew {
  315. // we check for existance so we can record the first one
  316. if _, ok := a.addrNew[i][addrKey]; ok {
  317. delete(a.addrNew[i], addrKey)
  318. ka.NewRefs--
  319. if freedBucket == -1 {
  320. freedBucket = i
  321. }
  322. }
  323. }
  324. a.nNew--
  325. if freedBucket == -1 { panic("Expected to find addr in at least one new bucket") }
  326. oldBucket := a.getOldBucket(ka.Addr)
  327. // If room in oldBucket, put it in.
  328. if len(a.addrOld[oldBucket]) < oldBucketSize {
  329. ka.OldBucket = Int16(oldBucket)
  330. a.addrOld[oldBucket] = append(a.addrOld[oldBucket], ka)
  331. a.nOld++
  332. return
  333. }
  334. // No room, we have to evict something else.
  335. rmkaIndex := a.pickOld(oldBucket)
  336. rmka := a.addrOld[oldBucket][rmkaIndex]
  337. // Find a new bucket to put rmka in.
  338. newBucket := a.getNewBucket(rmka.Addr, rmka.Src)
  339. if len(a.addrNew[newBucket]) >= newBucketSize {
  340. newBucket = freedBucket
  341. }
  342. // replace with ka in list.
  343. ka.OldBucket = Int16(oldBucket)
  344. a.addrOld[oldBucket][rmkaIndex] = ka
  345. rmka.OldBucket = -1
  346. // put rmka into new bucket
  347. rmkey := rmka.Addr.String()
  348. log.Tracef("Replacing %s with %s in old", rmkey, addrKey)
  349. a.addrNew[newBucket][rmkey] = rmka
  350. rmka.NewRefs++
  351. a.nNew++
  352. }
  353. // Returns the index in old bucket of oldest entry.
  354. func (a *AddrBook) pickOld(bucket int) int {
  355. var oldest *KnownAddress
  356. var oldestIndex int
  357. for i, ka := range a.addrOld[bucket] {
  358. if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt.Time) {
  359. oldest = ka
  360. oldestIndex = i
  361. }
  362. }
  363. return oldestIndex
  364. }
  365. // doublesha256(key + sourcegroup +
  366. // int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckes
  367. func (a *AddrBook) getNewBucket(addr, src *NetAddress) int {
  368. data1 := []byte{}
  369. data1 = append(data1, a.key[:]...)
  370. data1 = append(data1, []byte(GroupKey(addr))...)
  371. data1 = append(data1, []byte(GroupKey(src))...)
  372. hash1 := DoubleSha256(data1)
  373. hash64 := binary.LittleEndian.Uint64(hash1)
  374. hash64 %= newBucketsPerGroup
  375. var hashbuf [8]byte
  376. binary.LittleEndian.PutUint64(hashbuf[:], hash64)
  377. data2 := []byte{}
  378. data2 = append(data2, a.key[:]...)
  379. data2 = append(data2, GroupKey(src)...)
  380. data2 = append(data2, hashbuf[:]...)
  381. hash2 := DoubleSha256(data2)
  382. return int(binary.LittleEndian.Uint64(hash2) % newBucketCount)
  383. }
  384. // doublesha256(key + group + truncate_to_64bits(doublesha256(key + addr))%buckets_per_group) % num_buckets
  385. func (a *AddrBook) getOldBucket(addr *NetAddress) int {
  386. data1 := []byte{}
  387. data1 = append(data1, a.key[:]...)
  388. data1 = append(data1, []byte(addr.String())...)
  389. hash1 := DoubleSha256(data1)
  390. hash64 := binary.LittleEndian.Uint64(hash1)
  391. hash64 %= oldBucketsPerGroup
  392. var hashbuf [8]byte
  393. binary.LittleEndian.PutUint64(hashbuf[:], hash64)
  394. data2 := []byte{}
  395. data2 = append(data2, a.key[:]...)
  396. data2 = append(data2, GroupKey(addr)...)
  397. data2 = append(data2, hashbuf[:]...)
  398. hash2 := DoubleSha256(data2)
  399. return int(binary.LittleEndian.Uint64(hash2) % oldBucketCount)
  400. }
  401. // Return a string representing the network group of this address.
  402. // This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
  403. // "local" for a local address and the string "unroutable for an unroutable
  404. // address.
  405. func GroupKey (na *NetAddress) string {
  406. if na.Local() {
  407. return "local"
  408. }
  409. if !na.Routable() {
  410. return "unroutable"
  411. }
  412. if ipv4 := na.IP.To4(); ipv4 != nil {
  413. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
  414. }
  415. if na.RFC6145() || na.RFC6052() {
  416. // last four bytes are the ip address
  417. ip := net.IP(na.IP[12:16])
  418. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  419. }
  420. if na.RFC3964() {
  421. ip := net.IP(na.IP[2:7])
  422. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  423. }
  424. if na.RFC4380() {
  425. // teredo tunnels have the last 4 bytes as the v4 address XOR
  426. // 0xff.
  427. ip := net.IP(make([]byte, 4))
  428. for i, byte := range na.IP[12:16] {
  429. ip[i] = byte ^ 0xff
  430. }
  431. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  432. }
  433. // OK, so now we know ourselves to be a IPv6 address.
  434. // bitcoind uses /32 for everything, except for Hurricane Electric's
  435. // (he.net) IP range, which it uses /36 for.
  436. bits := 32
  437. heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
  438. Mask: net.CIDRMask(32, 128)}
  439. if heNet.Contains(na.IP) {
  440. bits = 36
  441. }
  442. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
  443. }