Browse Source

p2p: pass start time to flowrate and cleanup constructors (#7838)

After poking around #7828, I saw the oppertunity for this cleanup,
which I think is both reasonable on its own, and quite low impact, and
removes the math around process start time.
pull/7861/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
be83ec6664
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 41 additions and 64 deletions
  1. +12
    -15
      internal/blocksync/pool.go
  2. +17
    -15
      internal/libs/flowrate/flowrate.go
  3. +2
    -11
      internal/libs/flowrate/util.go
  4. +7
    -20
      internal/p2p/conn/connection.go
  5. +2
    -2
      internal/p2p/conn/connection_test.go
  6. +1
    -1
      internal/p2p/transport_mconn.go

+ 12
- 15
internal/blocksync/pool.go View File

@ -331,8 +331,16 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6
peer.base = base peer.base = base
peer.height = height peer.height = height
} else { } else {
peer = newBPPeer(pool, peerID, base, height)
peer.logger = pool.logger.With("peer", peerID)
peer = &bpPeer{
pool: pool,
id: peerID,
base: base,
height: height,
numPending: 0,
logger: pool.logger.With("peer", peerID),
startAt: time.Now(),
}
pool.peers[peerID] = peer pool.peers[peerID] = peer
} }
@ -490,24 +498,13 @@ type bpPeer struct {
recvMonitor *flowrate.Monitor recvMonitor *flowrate.Monitor
timeout *time.Timer timeout *time.Timer
startAt time.Time
logger log.Logger logger log.Logger
} }
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
base: base,
height: height,
numPending: 0,
logger: log.NewNopLogger(),
}
return peer
}
func (peer *bpPeer) resetMonitor() { func (peer *bpPeer) resetMonitor() {
peer.recvMonitor = flowrate.New(time.Second, time.Second*40)
peer.recvMonitor = flowrate.New(peer.startAt, time.Second, time.Second*40)
initialValue := float64(minRecvRate) * math.E initialValue := float64(minRecvRate) * math.E
peer.recvMonitor.SetREMA(initialValue) peer.recvMonitor.SetREMA(initialValue)
} }


+ 17
- 15
internal/libs/flowrate/flowrate.go View File

@ -14,11 +14,12 @@ import (
// Monitor monitors and limits the transfer rate of a data stream. // Monitor monitors and limits the transfer rate of a data stream.
type Monitor struct { type Monitor struct {
mu sync.Mutex // Mutex guarding access to all internal fields
active bool // Flag indicating an active transfer
start time.Duration // Transfer start time (clock() value)
bytes int64 // Total number of bytes transferred
samples int64 // Total number of samples taken
mu sync.Mutex // Mutex guarding access to all internal fields
active bool // Flag indicating an active transfer
start time.Duration // Transfer start time (clock() value)
pStartAt time.Time // time of process start
bytes int64 // Total number of bytes transferred
samples int64 // Total number of samples taken
rSample float64 // Most recent transfer rate sample (bytes per second) rSample float64 // Most recent transfer rate sample (bytes per second)
rEMA float64 // Exponential moving average of rSample rEMA float64 // Exponential moving average of rSample
@ -45,21 +46,22 @@ type Monitor struct {
// //
// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, // The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s,
// respectively. // respectively.
func New(sampleRate, windowSize time.Duration) *Monitor {
func New(startAt time.Time, sampleRate, windowSize time.Duration) *Monitor {
if sampleRate = clockRound(sampleRate); sampleRate <= 0 { if sampleRate = clockRound(sampleRate); sampleRate <= 0 {
sampleRate = 5 * clockRate sampleRate = 5 * clockRate
} }
if windowSize <= 0 { if windowSize <= 0 {
windowSize = 1 * time.Second windowSize = 1 * time.Second
} }
now := clock()
now := clock(startAt)
return &Monitor{ return &Monitor{
active: true,
start: now,
rWindow: windowSize.Seconds(),
sLast: now,
sRate: sampleRate,
tLast: now,
active: true,
start: now,
rWindow: windowSize.Seconds(),
sLast: now,
sRate: sampleRate,
tLast: now,
pStartAt: startAt,
} }
} }
@ -129,7 +131,7 @@ func (m *Monitor) Status() Status {
now := m.update(0) now := m.update(0)
s := Status{ s := Status{
Active: m.active, Active: m.active,
Start: clockToTime(m.start),
Start: m.pStartAt.Add(m.start),
Duration: m.sLast - m.start, Duration: m.sLast - m.start,
Idle: now - m.tLast, Idle: now - m.tLast,
Bytes: m.bytes, Bytes: m.bytes,
@ -222,7 +224,7 @@ func (m *Monitor) update(n int) (now time.Duration) {
if !m.active { if !m.active {
return return
} }
if now = clock(); n > 0 {
if now = clock(m.pStartAt); n > 0 {
m.tLast = now m.tLast = now
} }
m.sBytes += int64(n) m.sBytes += int64(n)


+ 2
- 11
internal/libs/flowrate/util.go View File

@ -13,18 +13,9 @@ import (
// clockRate is the resolution and precision of clock(). // clockRate is the resolution and precision of clock().
const clockRate = 20 * time.Millisecond const clockRate = 20 * time.Millisecond
// czero is the process start time rounded down to the nearest clockRate
// increment.
var czero = time.Now().Round(clockRate)
// clock returns a low resolution timestamp relative to the process start time. // clock returns a low resolution timestamp relative to the process start time.
func clock() time.Duration {
return time.Now().Round(clockRate).Sub(czero)
}
// clockToTime converts a clock() timestamp to an absolute time.Time value.
func clockToTime(c time.Duration) time.Time {
return czero.Add(c)
func clock(startAt time.Time) time.Duration {
return time.Now().Round(clockRate).Sub(startAt)
} }
// clockRound returns d rounded to the nearest clockRate increment. // clockRound returns d rounded to the nearest clockRate increment.


+ 7
- 20
internal/p2p/conn/connection.go View File

@ -134,6 +134,9 @@ type MConnConfig struct {
// Maximum wait time for pongs // Maximum wait time for pongs
PongTimeout time.Duration `mapstructure:"pong_timeout"` PongTimeout time.Duration `mapstructure:"pong_timeout"`
// Process/Transport Start time
StartTime time.Time `mapstructure:",omitempty"`
} }
// DefaultMConnConfig returns the default config. // DefaultMConnConfig returns the default config.
@ -145,33 +148,17 @@ func DefaultMConnConfig() MConnConfig {
FlushThrottle: defaultFlushThrottle, FlushThrottle: defaultFlushThrottle,
PingInterval: defaultPingInterval, PingInterval: defaultPingInterval,
PongTimeout: defaultPongTimeout, PongTimeout: defaultPongTimeout,
StartTime: time.Now(),
} }
} }
// NewMConnection wraps net.Conn and creates multiplex connection
// NewMConnection wraps net.Conn and creates multiplex connection with a config
func NewMConnection( func NewMConnection(
logger log.Logger, logger log.Logger,
conn net.Conn, conn net.Conn,
chDescs []*ChannelDescriptor, chDescs []*ChannelDescriptor,
onReceive receiveCbFunc, onReceive receiveCbFunc,
onError errorCbFunc, onError errorCbFunc,
) *MConnection {
return NewMConnectionWithConfig(
logger,
conn,
chDescs,
onReceive,
onError,
DefaultMConnConfig())
}
// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
func NewMConnectionWithConfig(
logger log.Logger,
conn net.Conn,
chDescs []*ChannelDescriptor,
onReceive receiveCbFunc,
onError errorCbFunc,
config MConnConfig, config MConnConfig,
) *MConnection { ) *MConnection {
if config.PongTimeout >= config.PingInterval { if config.PongTimeout >= config.PingInterval {
@ -183,8 +170,8 @@ func NewMConnectionWithConfig(
conn: conn, conn: conn,
bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
sendMonitor: flowrate.New(0, 0),
recvMonitor: flowrate.New(0, 0),
sendMonitor: flowrate.New(config.StartTime, 0, 0),
recvMonitor: flowrate.New(config.StartTime, 0, 0),
send: make(chan struct{}, 1), send: make(chan struct{}, 1),
pong: make(chan struct{}, 1), pong: make(chan struct{}, 1),
onReceive: onReceive, onReceive: onReceive,


+ 2
- 2
internal/p2p/conn/connection_test.go View File

@ -42,7 +42,7 @@ func createMConnectionWithCallbacks(
cfg.PingInterval = 90 * time.Millisecond cfg.PingInterval = 90 * time.Millisecond
cfg.PongTimeout = 45 * time.Millisecond cfg.PongTimeout = 45 * time.Millisecond
chDescs := []*ChannelDescriptor{{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} chDescs := []*ChannelDescriptor{{ID: 0x01, Priority: 1, SendQueueCapacity: 1}}
c := NewMConnectionWithConfig(logger, conn, chDescs, onReceive, onError, cfg)
c := NewMConnection(logger, conn, chDescs, onReceive, onError, cfg)
return c return c
} }
@ -453,7 +453,7 @@ func newClientAndServerConnsForReadErrors(
} }
logger := log.TestingLogger() logger := log.TestingLogger()
mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError)
mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError, DefaultMConnConfig())
err := mconnClient.Start(ctx) err := mconnClient.Start(ctx)
require.NoError(t, err) require.NoError(t, err)


+ 1
- 1
internal/p2p/transport_mconn.go View File

@ -376,7 +376,7 @@ func (c *mConnConnection) handshake(
return nil, types.NodeInfo{}, nil, err return nil, types.NodeInfo{}, nil, err
} }
mconn := conn.NewMConnectionWithConfig(
mconn := conn.NewMConnection(
c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID)), c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID)),
secretConn, secretConn,
c.channelDescs, c.channelDescs,


Loading…
Cancel
Save