|
|
@ -14,15 +14,15 @@ import ( |
|
|
|
|
|
|
|
// Load generates transactions against the network until the given context is
|
|
|
|
// canceled.
|
|
|
|
func Load(ctx context.Context, testnet *e2e.Testnet) error { |
|
|
|
func Load(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet) error { |
|
|
|
// Since transactions are executed across all nodes in the network, we need
|
|
|
|
// to reduce transaction load for larger networks to avoid using too much
|
|
|
|
// CPU. This gives high-throughput small networks and low-throughput large ones.
|
|
|
|
// This also limits the number of TCP connections, since each worker has
|
|
|
|
// a connection to all nodes.
|
|
|
|
concurrency := len(testnet.Nodes) * 8 |
|
|
|
if concurrency > 64 { |
|
|
|
concurrency = 64 |
|
|
|
concurrency := len(testnet.Nodes) * 2 |
|
|
|
if concurrency > 32 { |
|
|
|
concurrency = 32 |
|
|
|
} |
|
|
|
|
|
|
|
chTx := make(chan types.Tx) |
|
|
@ -38,7 +38,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { |
|
|
|
|
|
|
|
started := time.Now() |
|
|
|
|
|
|
|
go loadGenerate(ctx, chTx, testnet.TxSize) |
|
|
|
go loadGenerate(ctx, r, chTx, testnet.TxSize, len(testnet.Nodes)) |
|
|
|
|
|
|
|
for w := 0; w < concurrency; w++ { |
|
|
|
go loadProcess(ctx, testnet, chTx, chSuccess) |
|
|
@ -85,7 +85,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { |
|
|
|
// generation is primarily the result of backpressure from the
|
|
|
|
// broadcast transaction, though there is still some timer-based
|
|
|
|
// limiting.
|
|
|
|
func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { |
|
|
|
func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSize int64, networkSize int) { |
|
|
|
timer := time.NewTimer(0) |
|
|
|
defer timer.Stop() |
|
|
|
defer close(chTx) |
|
|
@ -101,8 +101,8 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { |
|
|
|
// This gives a reasonable load without putting too much data in the app.
|
|
|
|
id := rand.Int63() % 100 // nolint: gosec
|
|
|
|
|
|
|
|
bz := make([]byte, size) |
|
|
|
_, err := rand.Read(bz) // nolint: gosec
|
|
|
|
bz := make([]byte, txSize) |
|
|
|
_, err := r.Read(bz) |
|
|
|
if err != nil { |
|
|
|
panic(fmt.Sprintf("Failed to read random bytes: %v", err)) |
|
|
|
} |
|
|
@ -114,30 +114,25 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { |
|
|
|
case chTx <- tx: |
|
|
|
// sleep for a bit before sending the
|
|
|
|
// next transaction.
|
|
|
|
timer.Reset(loadGenerateWaitTime(size)) |
|
|
|
timer.Reset(loadGenerateWaitTime(r, networkSize)) |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
func loadGenerateWaitTime(size int64) time.Duration { |
|
|
|
func loadGenerateWaitTime(r *rand.Rand, size int) time.Duration { |
|
|
|
const ( |
|
|
|
min = int64(10 * time.Millisecond) |
|
|
|
max = int64(100 * time.Millisecond) |
|
|
|
min = int64(250 * time.Millisecond) |
|
|
|
max = int64(time.Second) |
|
|
|
) |
|
|
|
|
|
|
|
var ( |
|
|
|
baseJitter = rand.Int63n(max-min+1) + min // nolint: gosec
|
|
|
|
sizeFactor = size * int64(time.Millisecond) |
|
|
|
sizeJitter = rand.Int63n(sizeFactor-min+1) + min // nolint: gosec
|
|
|
|
waitTime = time.Duration(baseJitter + sizeJitter) |
|
|
|
baseJitter = r.Int63n(max-min+1) + min |
|
|
|
sizeFactor = int64(size) * min |
|
|
|
sizeJitter = r.Int63n(sizeFactor-min+1) + min |
|
|
|
) |
|
|
|
|
|
|
|
if size == 1 { |
|
|
|
return waitTime / 2 |
|
|
|
} |
|
|
|
|
|
|
|
return waitTime |
|
|
|
return time.Duration(baseJitter + sizeJitter) |
|
|
|
} |
|
|
|
|
|
|
|
// loadProcess processes transactions
|
|
|
|