diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index c118ac713..5408f2969 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -7,6 +7,7 @@ import ( "path" "sync" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,7 +31,11 @@ import ( // Byzantine node sends two different prevotes (nil and blockID) to the same // validator. func TestByzantinePrevoteEquivocation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + // empirically, this test either passes in <1s or hits some + // kind of deadlock and hit the larger timeout. This timeout + // can be extended a bunch if needed, but it's good to avoid + // falling back to a much coarser timeout + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() config := configSetup(t) @@ -275,12 +280,11 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NoError(t, err) for idx, ev := range evidenceFromEachValidator { - if assert.NotNil(t, ev, idx) { - ev, ok := ev.(*types.DuplicateVoteEvidence) - assert.True(t, ok) - assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress) - assert.Equal(t, prevoteHeight, ev.Height()) - } + require.NotNil(t, ev, idx) + ev, ok := ev.(*types.DuplicateVoteEvidence) + require.True(t, ok) + assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress) + assert.Equal(t, prevoteHeight, ev.Height()) } } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 898545ccf..9b3031597 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -73,17 +73,17 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe blockStore, ) - bytes, _ := os.ReadFile(cs.config.WalFile()) - t.Logf("====== WAL: \n\r%X\n", bytes) - - err = cs.Start(ctx) + bytes, err := os.ReadFile(cs.config.WalFile()) require.NoError(t, err) + require.NotNil(t, bytes) + + require.NoError(t, cs.Start(ctx)) defer func() { if err := cs.Stop(); err != nil { t.Error(err) } }() - + t.Cleanup(cs.Wait) // This is just a signal that we haven't halted; its not something contained // in the WAL itself. Assuming the consensus state is running, replay of any // WAL, including the empty one, should eventually be followed by a new @@ -157,8 +157,6 @@ func crashWALandCheckLiveness(rctx context.Context, t *testing.T, consensusRepla i := 1 LOOP: for { - t.Logf("====== LOOP %d\n", i) - // create consensus state from a clean slate logger := log.NewNopLogger() blockDB := dbm.NewMemDB() @@ -204,8 +202,6 @@ LOOP: select { case err := <-walPanicked: - t.Logf("WAL panicked: %v", err) - // make sure we can make blocks after a crash startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore) diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index df795f24e..b52c41b9f 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -136,7 +136,7 @@ func TestWALSearchForEndHeight(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - logger := log.NewTestingLogger(t) + logger := log.NewNopLogger() walBody, err := WALWithNBlocks(ctx, t, logger, 6) if err != nil {