From bdc2711f271759441def8a7c2a423246804337a2 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 13 Jul 2015 13:17:47 -0700 Subject: [PATCH] p2p comment prettify --- p2p/addrbook.go | 7 ++++--- p2p/listener.go | 2 -- p2p/peer.go | 1 + p2p/peer_set.go | 18 +++++++++--------- p2p/switch.go | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/p2p/addrbook.go b/p2p/addrbook.go index 99aa5cef..f1f17851 100644 --- a/p2p/addrbook.go +++ b/p2p/addrbook.go @@ -615,8 +615,8 @@ func (a *AddrBook) moveToOld(ka *knownAddress) { } } -// doublesha256(key + sourcegroup + -// int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckes +// doublesha256( key + sourcegroup + +// int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int { data1 := []byte{} data1 = append(data1, []byte(a.key)...) @@ -636,7 +636,8 @@ func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int { return int(binary.BigEndian.Uint64(hash2) % newBucketCount) } -// doublesha256(key + group + truncate_to_64bits(doublesha256(key + addr))%buckets_per_group) % num_buckets +// doublesha256( key + group + +// int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets func (a *AddrBook) calcOldBucket(addr *NetAddress) int { data1 := []byte{} data1 = append(data1, []byte(a.key)...) diff --git a/p2p/listener.go b/p2p/listener.go index 790caa41..f69422b1 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -99,8 +99,6 @@ SKIP_UPNP: } // Accept connections and pass on the channel -// Reading from the channel blocks on the peerHandshake for each connection -// Connection is ignored if we have too many connections to that ip range func (l *DefaultListener) listenRoutine() { for { conn, err := l.listener.Accept() diff --git a/p2p/peer.go b/p2p/peer.go index d04434a1..493df7f1 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -22,6 +22,7 @@ type Peer struct { Data *CMap // User data. } +// NOTE: blocking func peerHandshake(conn net.Conn, ourNodeInfo *types.NodeInfo) (*types.NodeInfo, error) { var peerNodeInfo = new(types.NodeInfo) var wg sync.WaitGroup diff --git a/p2p/peer_set.go b/p2p/peer_set.go index a0e43475..2033529d 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -22,7 +22,7 @@ var ( // PeerSet is a special structure for keeping a table of peers. // Iteration over the peers is super fast and thread-safe. -// We also track how many peers per ip range and avoid too many +// We also track how many peers per IP range and avoid too many type PeerSet struct { mtx sync.Mutex lookup map[string]*peerSetItem @@ -44,7 +44,7 @@ func NewPeerSet() *PeerSet { } // Returns false if peer with key (uuid) is already in set -// or if we have too many peers from the peer's ip range +// or if we have too many peers from the peer's IP range func (ps *PeerSet) Add(peer *Peer) error { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -52,8 +52,8 @@ func (ps *PeerSet) Add(peer *Peer) error { return ErrSwitchDuplicatePeer } - // ensure we havent maxed out connections for the peer's ip range yet - // and update the ip range counters + // ensure we havent maxed out connections for the peer's IP range yet + // and update the IP range counters if !ps.updateIPRangeCounts(peer.Host) { return ErrSwitchMaxPeersPerIPRange } @@ -126,9 +126,9 @@ func (ps *PeerSet) List() []*Peer { } //----------------------------------------------------------------------------- -// track the number of ips we're connected to for each ip address range +// track the number of IPs we're connected to for each IP address range -// forms an ip address hierarchy tree with counts +// forms an IP address hierarchy tree with counts // the struct itself is not thread safe and should always only be accessed with the ps.mtx locked type nestedCounter struct { count int @@ -141,7 +141,7 @@ func NewNestedCounter() *nestedCounter { return nc } -// Check if we have too many ips in the ip range of the incoming connection +// Check if we have too many IPs in the IP range of the incoming connection // Thread safe func (ps *PeerSet) HasMaxForIPRange(conn net.Conn) (ok bool) { ps.mtx.Lock() @@ -161,7 +161,7 @@ func (ps *PeerSet) HasMaxForIPRange(conn net.Conn) (ok bool) { return false } -// Update counts for this address' ip range +// Update counts for this address' IP range // Returns false if we already have enough connections // Not thread safe (only called by ps.Add()) func (ps *PeerSet) updateIPRangeCounts(address string) bool { @@ -171,7 +171,7 @@ func (ps *PeerSet) updateIPRangeCounts(address string) bool { return updateNestedCountRecursive(c, spl, 0) } -// recursively descend the ip hierarchy, checking if we have +// recursively descend the IP hierarchy, checking if we have // max peers for each range and updating if not func updateNestedCountRecursive(c *nestedCounter, ipBytes []string, index int) bool { if index == len(ipBytes) { diff --git a/p2p/switch.go b/p2p/switch.go index 497bfbee..cbd627b9 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -194,7 +194,7 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er peer := newPeer(conn, peerNodeInfo, outbound, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError) // Add the peer to .peers - // ignore if duplicate or if we already have too many for that ip range + // ignore if duplicate or if we already have too many for that IP range if err := sw.peers.Add(peer); err != nil { log.Info("Ignoring peer", "error", err, "peer", peer) peer.stop() // will also close conn @@ -315,7 +315,7 @@ func (sw *Switch) listenerRoutine(l Listener) { continue } - // Ignore connections from ip ranges for which we have too many + // Ignore connections from IP ranges for which we have too many if sw.peers.HasMaxForIPRange(inConn) { log.Debug("Ignoring inbound connection: already have enough peers for that IP range", "address", inConn.RemoteAddr().String()) continue