|
@ -130,6 +130,7 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...ty |
|
|
// ensure that the transactions get fully broadcast to the
|
|
|
// ensure that the transactions get fully broadcast to the
|
|
|
// rest of the network
|
|
|
// rest of the network
|
|
|
wg := &sync.WaitGroup{} |
|
|
wg := &sync.WaitGroup{} |
|
|
|
|
|
var count int |
|
|
for name, pool := range rts.mempools { |
|
|
for name, pool := range rts.mempools { |
|
|
if !p2ptest.NodeInSlice(name, ids) { |
|
|
if !p2ptest.NodeInSlice(name, ids) { |
|
|
continue |
|
|
continue |
|
@ -139,14 +140,15 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...ty |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
wg.Add(1) |
|
|
wg.Add(1) |
|
|
go func(pool *TxMempool) { |
|
|
|
|
|
|
|
|
go func(name types.NodeID, pool *TxMempool) { |
|
|
defer wg.Done() |
|
|
defer wg.Done() |
|
|
require.Eventually(t, func() bool { return len(txs) == pool.Size() }, |
|
|
require.Eventually(t, func() bool { return len(txs) == pool.Size() }, |
|
|
time.Minute, |
|
|
time.Minute, |
|
|
250*time.Millisecond, |
|
|
250*time.Millisecond, |
|
|
"ntx=%d, size=%d", len(txs), pool.Size(), |
|
|
|
|
|
|
|
|
"node=%q, ntx=%d, size=%d", name, len(txs), pool.Size(), |
|
|
) |
|
|
) |
|
|
}(pool) |
|
|
|
|
|
|
|
|
}(name, pool) |
|
|
|
|
|
count++ |
|
|
} |
|
|
} |
|
|
wg.Wait() |
|
|
wg.Wait() |
|
|
} |
|
|
} |
|
@ -196,8 +198,8 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func TestReactorBroadcastTxs(t *testing.T) { |
|
|
func TestReactorBroadcastTxs(t *testing.T) { |
|
|
numTxs := 1000 |
|
|
|
|
|
numNodes := 10 |
|
|
|
|
|
|
|
|
numTxs := 512 |
|
|
|
|
|
numNodes := 4 |
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
defer cancel() |
|
|
defer cancel() |
|
|
|
|
|
|
|
|