@ -0,0 +1,83 @@ | |||
package addrbook | |||
import ( | |||
"encoding/json" | |||
"os" | |||
cmn "github.com/tendermint/tmlibs/common" | |||
) | |||
/* Loading & Saving */ | |||
type addrBookJSON struct { | |||
Key string | |||
Addrs []*knownAddress | |||
} | |||
func (a *addrBook) saveToFile(filePath string) { | |||
a.Logger.Info("Saving AddrBook to file", "size", a.Size()) | |||
a.mtx.Lock() | |||
defer a.mtx.Unlock() | |||
// Compile Addrs | |||
addrs := []*knownAddress{} | |||
for _, ka := range a.addrLookup { | |||
addrs = append(addrs, ka) | |||
} | |||
aJSON := &addrBookJSON{ | |||
Key: a.key, | |||
Addrs: addrs, | |||
} | |||
jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") | |||
if err != nil { | |||
a.Logger.Error("Failed to save AddrBook to file", "err", err) | |||
return | |||
} | |||
err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644) | |||
if err != nil { | |||
a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) | |||
} | |||
} | |||
// Returns false if file does not exist. | |||
// cmn.Panics if file is corrupt. | |||
func (a *addrBook) loadFromFile(filePath string) bool { | |||
// If doesn't exist, do nothing. | |||
_, err := os.Stat(filePath) | |||
if os.IsNotExist(err) { | |||
return false | |||
} | |||
// Load addrBookJSON{} | |||
r, err := os.Open(filePath) | |||
if err != nil { | |||
cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err)) | |||
} | |||
defer r.Close() // nolint: errcheck | |||
aJSON := &addrBookJSON{} | |||
dec := json.NewDecoder(r) | |||
err = dec.Decode(aJSON) | |||
if err != nil { | |||
cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err)) | |||
} | |||
// Restore all the fields... | |||
// Restore the key | |||
a.key = aJSON.Key | |||
// Restore .bucketsNew & .bucketsOld | |||
for _, ka := range aJSON.Addrs { | |||
for _, bucketIndex := range ka.Buckets { | |||
bucket := a.getBucket(ka.BucketType, bucketIndex) | |||
bucket[ka.Addr.String()] = ka | |||
} | |||
a.addrLookup[ka.ID()] = ka | |||
if ka.BucketType == bucketTypeNew { | |||
a.nNew++ | |||
} else { | |||
a.nOld++ | |||
} | |||
} | |||
return true | |||
} |
@ -0,0 +1,138 @@ | |||
package addrbook | |||
import "time" | |||
// knownAddress tracks information about a known network address | |||
// that is used to determine how viable an address is. | |||
type knownAddress struct { | |||
Addr *NetAddress | |||
Src *NetAddress | |||
Attempts int32 | |||
LastAttempt time.Time | |||
LastSuccess time.Time | |||
BucketType byte | |||
Buckets []int | |||
} | |||
func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress { | |||
return &knownAddress{ | |||
Addr: addr, | |||
Src: src, | |||
Attempts: 0, | |||
LastAttempt: time.Now(), | |||
BucketType: bucketTypeNew, | |||
Buckets: nil, | |||
} | |||
} | |||
func (ka *knownAddress) ID() ID { | |||
return ka.Addr.ID | |||
} | |||
func (ka *knownAddress) copy() *knownAddress { | |||
return &knownAddress{ | |||
Addr: ka.Addr, | |||
Src: ka.Src, | |||
Attempts: ka.Attempts, | |||
LastAttempt: ka.LastAttempt, | |||
LastSuccess: ka.LastSuccess, | |||
BucketType: ka.BucketType, | |||
Buckets: ka.Buckets, | |||
} | |||
} | |||
func (ka *knownAddress) isOld() bool { | |||
return ka.BucketType == bucketTypeOld | |||
} | |||
func (ka *knownAddress) isNew() bool { | |||
return ka.BucketType == bucketTypeNew | |||
} | |||
func (ka *knownAddress) markAttempt() { | |||
now := time.Now() | |||
ka.LastAttempt = now | |||
ka.Attempts += 1 | |||
} | |||
func (ka *knownAddress) markGood() { | |||
now := time.Now() | |||
ka.LastAttempt = now | |||
ka.Attempts = 0 | |||
ka.LastSuccess = now | |||
} | |||
func (ka *knownAddress) addBucketRef(bucketIdx int) int { | |||
for _, bucket := range ka.Buckets { | |||
if bucket == bucketIdx { | |||
// TODO refactor to return error? | |||
// log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) | |||
return -1 | |||
} | |||
} | |||
ka.Buckets = append(ka.Buckets, bucketIdx) | |||
return len(ka.Buckets) | |||
} | |||
func (ka *knownAddress) removeBucketRef(bucketIdx int) int { | |||
buckets := []int{} | |||
for _, bucket := range ka.Buckets { | |||
if bucket != bucketIdx { | |||
buckets = append(buckets, bucket) | |||
} | |||
} | |||
if len(buckets) != len(ka.Buckets)-1 { | |||
// TODO refactor to return error? | |||
// log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) | |||
return -1 | |||
} | |||
ka.Buckets = buckets | |||
return len(ka.Buckets) | |||
} | |||
/* | |||
An address is bad if the address in question is a New address, has not been tried in the last | |||
minute, and meets one of the following criteria: | |||
1) It claims to be from the future | |||
2) It hasn't been seen in over a week | |||
3) It has failed at least three times and never succeeded | |||
4) It has failed ten times in the last week | |||
All addresses that meet these criteria are assumed to be worthless and not | |||
worth keeping hold of. | |||
XXX: so a good peer needs us to call MarkGood before the conditions above are reached! | |||
*/ | |||
func (ka *knownAddress) isBad() bool { | |||
// Is Old --> good | |||
if ka.BucketType == bucketTypeOld { | |||
return false | |||
} | |||
// Has been attempted in the last minute --> good | |||
if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) { | |||
return false | |||
} | |||
// Too old? | |||
// XXX: does this mean if we've kept a connection up for this long we'll disconnect?! | |||
// and shouldn't it be .Before ? | |||
if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { | |||
return true | |||
} | |||
// Never succeeded? | |||
if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { | |||
return true | |||
} | |||
// Hasn't succeeded in too long? | |||
// XXX: does this mean if we've kept a connection up for this long we'll disconnect?! | |||
if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && | |||
ka.Attempts >= maxFailures { | |||
return true | |||
} | |||
return false | |||
} |
@ -0,0 +1,55 @@ | |||
package addrbook | |||
import "time" | |||
const ( | |||
// addresses under which the address manager will claim to need more addresses. | |||
needAddressThreshold = 1000 | |||
// interval used to dump the address cache to disk for future use. | |||
dumpAddressInterval = time.Minute * 2 | |||
// max addresses in each old address bucket. | |||
oldBucketSize = 64 | |||
// buckets we split old addresses over. | |||
oldBucketCount = 64 | |||
// max addresses in each new address bucket. | |||
newBucketSize = 64 | |||
// buckets that we spread new addresses over. | |||
newBucketCount = 256 | |||
// old buckets over which an address group will be spread. | |||
oldBucketsPerGroup = 4 | |||
// new buckets over which a source address group will be spread. | |||
newBucketsPerGroup = 32 | |||
// buckets a frequently seen new address may end up in. | |||
maxNewBucketsPerAddress = 4 | |||
// days before which we assume an address has vanished | |||
// if we have not seen it announced in that long. | |||
numMissingDays = 7 | |||
// tries without a single success before we assume an address is bad. | |||
numRetries = 3 | |||
// max failures we will accept without a success before considering an address bad. | |||
maxFailures = 10 // ? | |||
// days since the last success before we will consider evicting an address. | |||
minBadDays = 7 | |||
// % of total addresses known returned by GetSelection. | |||
getSelectionPercent = 23 | |||
// min addresses that must be returned by GetSelection. Useful for bootstrapping. | |||
minGetSelection = 32 | |||
// max addresses returned by GetSelection | |||
// NOTE: this must match "maxPexMessageSize" | |||
maxGetSelection = 250 | |||
) |