Browse Source

p2p comment prettify

pull/111/merge
Jae Kwon 10 years ago
parent
commit
bdc2711f27
5 changed files with 16 additions and 16 deletions
  1. +4
    -3
      p2p/addrbook.go
  2. +0
    -2
      p2p/listener.go
  3. +1
    -0
      p2p/peer.go
  4. +9
    -9
      p2p/peer_set.go
  5. +2
    -2
      p2p/switch.go

+ 4
- 3
p2p/addrbook.go View File

@ -615,8 +615,8 @@ func (a *AddrBook) moveToOld(ka *knownAddress) {
}
}
// doublesha256(key + sourcegroup +
// int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckes
// doublesha256( key + sourcegroup +
// int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
data1 := []byte{}
data1 = append(data1, []byte(a.key)...)
@ -636,7 +636,8 @@ func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
}
// doublesha256(key + group + truncate_to_64bits(doublesha256(key + addr))%buckets_per_group) % num_buckets
// doublesha256( key + group +
// int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
data1 := []byte{}
data1 = append(data1, []byte(a.key)...)


+ 0
- 2
p2p/listener.go View File

@ -99,8 +99,6 @@ SKIP_UPNP:
}
// Accept connections and pass on the channel
// Reading from the channel blocks on the peerHandshake for each connection
// Connection is ignored if we have too many connections to that ip range
func (l *DefaultListener) listenRoutine() {
for {
conn, err := l.listener.Accept()


+ 1
- 0
p2p/peer.go View File

@ -22,6 +22,7 @@ type Peer struct {
Data *CMap // User data.
}
// NOTE: blocking
func peerHandshake(conn net.Conn, ourNodeInfo *types.NodeInfo) (*types.NodeInfo, error) {
var peerNodeInfo = new(types.NodeInfo)
var wg sync.WaitGroup


+ 9
- 9
p2p/peer_set.go View File

@ -22,7 +22,7 @@ var (
// PeerSet is a special structure for keeping a table of peers.
// Iteration over the peers is super fast and thread-safe.
// We also track how many peers per ip range and avoid too many
// We also track how many peers per IP range and avoid too many
type PeerSet struct {
mtx sync.Mutex
lookup map[string]*peerSetItem
@ -44,7 +44,7 @@ func NewPeerSet() *PeerSet {
}
// Returns false if peer with key (uuid) is already in set
// or if we have too many peers from the peer's ip range
// or if we have too many peers from the peer's IP range
func (ps *PeerSet) Add(peer *Peer) error {
ps.mtx.Lock()
defer ps.mtx.Unlock()
@ -52,8 +52,8 @@ func (ps *PeerSet) Add(peer *Peer) error {
return ErrSwitchDuplicatePeer
}
// ensure we havent maxed out connections for the peer's ip range yet
// and update the ip range counters
// ensure we havent maxed out connections for the peer's IP range yet
// and update the IP range counters
if !ps.updateIPRangeCounts(peer.Host) {
return ErrSwitchMaxPeersPerIPRange
}
@ -126,9 +126,9 @@ func (ps *PeerSet) List() []*Peer {
}
//-----------------------------------------------------------------------------
// track the number of ips we're connected to for each ip address range
// track the number of IPs we're connected to for each IP address range
// forms an ip address hierarchy tree with counts
// forms an IP address hierarchy tree with counts
// the struct itself is not thread safe and should always only be accessed with the ps.mtx locked
type nestedCounter struct {
count int
@ -141,7 +141,7 @@ func NewNestedCounter() *nestedCounter {
return nc
}
// Check if we have too many ips in the ip range of the incoming connection
// Check if we have too many IPs in the IP range of the incoming connection
// Thread safe
func (ps *PeerSet) HasMaxForIPRange(conn net.Conn) (ok bool) {
ps.mtx.Lock()
@ -161,7 +161,7 @@ func (ps *PeerSet) HasMaxForIPRange(conn net.Conn) (ok bool) {
return false
}
// Update counts for this address' ip range
// Update counts for this address' IP range
// Returns false if we already have enough connections
// Not thread safe (only called by ps.Add())
func (ps *PeerSet) updateIPRangeCounts(address string) bool {
@ -171,7 +171,7 @@ func (ps *PeerSet) updateIPRangeCounts(address string) bool {
return updateNestedCountRecursive(c, spl, 0)
}
// recursively descend the ip hierarchy, checking if we have
// recursively descend the IP hierarchy, checking if we have
// max peers for each range and updating if not
func updateNestedCountRecursive(c *nestedCounter, ipBytes []string, index int) bool {
if index == len(ipBytes) {


+ 2
- 2
p2p/switch.go View File

@ -194,7 +194,7 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er
peer := newPeer(conn, peerNodeInfo, outbound, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError)
// Add the peer to .peers
// ignore if duplicate or if we already have too many for that ip range
// ignore if duplicate or if we already have too many for that IP range
if err := sw.peers.Add(peer); err != nil {
log.Info("Ignoring peer", "error", err, "peer", peer)
peer.stop() // will also close conn
@ -315,7 +315,7 @@ func (sw *Switch) listenerRoutine(l Listener) {
continue
}
// Ignore connections from ip ranges for which we have too many
// Ignore connections from IP ranges for which we have too many
if sw.peers.HasMaxForIPRange(inConn) {
log.Debug("Ignoring inbound connection: already have enough peers for that IP range", "address", inConn.RemoteAddr().String())
continue


Loading…
Cancel
Save