Browse Source

testing: logger cleanup (#8153)

This contains two major changes:

- Remove the legacy test logging method, and just explicitly call the
  noop logger. This is just to make the test logging behavior more
  coherent and clear. 
  
- Move the logging in the light package from the testing.T logger to
  the noop logger. It's really the case that we very rarely need/want
  to consider test logs unless we're doing reproductions and running a
  narrow set of tests.
  
In most cases, I (for one) prefer to run in verbose mode so I can
watch progress of tests, but I basically never need to consider
logs. If I do want to see logs, then I can edit in the testing.T
logger locally (which is what you have to do today, anyway.)
pull/8163/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
0bded371c5
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 160 additions and 169 deletions
  1. +3
    -3
      internal/blocksync/pool_test.go
  2. +3
    -3
      internal/blocksync/reactor_test.go
  3. +3
    -3
      internal/consensus/byzantine_test.go
  4. +1
    -1
      internal/consensus/common_test.go
  5. +5
    -5
      internal/consensus/mempool_test.go
  6. +1
    -1
      internal/consensus/pbts_test.go
  7. +1
    -1
      internal/consensus/peer_state_test.go
  8. +3
    -3
      internal/consensus/reactor_test.go
  9. +4
    -4
      internal/consensus/replay_test.go
  10. +1
    -1
      internal/consensus/wal_generator.go
  11. +3
    -3
      internal/consensus/wal_test.go
  12. +8
    -8
      internal/eventbus/event_bus_test.go
  13. +1
    -1
      internal/evidence/reactor_test.go
  14. +1
    -1
      internal/evidence/verify_test.go
  15. +12
    -12
      internal/inspect/inspect_test.go
  16. +7
    -7
      internal/libs/autofile/group_test.go
  17. +1
    -1
      internal/mempool/mempool_test.go
  18. +10
    -10
      internal/p2p/conn/connection_test.go
  19. +1
    -1
      internal/p2p/p2ptest/network.go
  20. +2
    -2
      internal/p2p/pex/reactor_test.go
  21. +1
    -1
      internal/p2p/router_filter_test.go
  22. +10
    -10
      internal/p2p/router_test.go
  23. +4
    -4
      internal/p2p/transport_mconn_test.go
  24. +1
    -1
      internal/p2p/transport_memory_test.go
  25. +5
    -5
      internal/proxy/client_test.go
  26. +1
    -1
      internal/pubsub/example_test.go
  27. +14
    -14
      internal/pubsub/pubsub_test.go
  28. +15
    -15
      internal/state/execution_test.go
  29. +1
    -1
      internal/state/indexer/indexer_service_test.go
  30. +4
    -4
      internal/state/validation_test.go
  31. +9
    -18
      libs/log/testing.go
  32. +14
    -14
      light/client_test.go
  33. +7
    -7
      light/detector_test.go
  34. +3
    -3
      light/light_test.go

+ 3
- 3
internal/blocksync/pool_test.go View File

@ -86,7 +86,7 @@ func TestBlockPoolBasic(t *testing.T) {
peers := makePeers(10, start+1, 1000)
errorsCh := make(chan peerError, 1000)
requestsCh := make(chan BlockRequest, 1000)
pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh)
pool := NewBlockPool(log.NewNopLogger(), start, requestsCh, errorsCh)
if err := pool.Start(ctx); err != nil {
t.Error(err)
@ -138,7 +138,7 @@ func TestBlockPoolTimeout(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
start := int64(42)
peers := makePeers(10, start+1, 1000)
@ -207,7 +207,7 @@ func TestBlockPoolRemovePeer(t *testing.T) {
requestsCh := make(chan BlockRequest)
errorsCh := make(chan peerError)
pool := NewBlockPool(log.TestingLogger(), 1, requestsCh, errorsCh)
pool := NewBlockPool(log.NewNopLogger(), 1, requestsCh, errorsCh)
err := pool.Start(ctx)
require.NoError(t, err)
t.Cleanup(func() { cancel(); pool.Wait() })


+ 3
- 3
internal/blocksync/reactor_test.go View File

@ -62,7 +62,7 @@ func setup(
"must specify at least one block height (nodes)")
rts := &reactorTestSuite{
logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()),
logger: log.NewNopLogger().With("module", "block_sync", "testCase", t.Name()),
network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}),
nodes: make([]types.NodeID, 0, numNodes),
reactors: make(map[types.NodeID]*Reactor, numNodes),
@ -108,7 +108,7 @@ func (rts *reactorTestSuite) addNode(
) {
t.Helper()
logger := log.TestingLogger()
logger := log.NewNopLogger()
rts.nodes = append(rts.nodes, nodeID)
rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics())
@ -139,7 +139,7 @@ func (rts *reactorTestSuite) addNode(
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
log.NewNopLogger(),
rts.app[nodeID],
mp,
sm.EmptyEvidencePool{},


+ 3
- 3
internal/consensus/byzantine_test.go View File

@ -79,7 +79,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
// Make Mempool
mempool := mempool.NewTxMempool(
log.TestingLogger().With("module", "mempool"),
log.NewNopLogger().With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
)
@ -87,7 +87,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
mempool.EnableTxsAvailable()
}
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
eventBus := eventbus.NewDefault(log.NewNopLogger().With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
// Make a full instance of the evidence pool
@ -95,7 +95,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
evpool := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
// Make State
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx, logger, thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
require.NoError(t, err)
// set private validator


+ 1
- 1
internal/consensus/common_test.go View File

@ -777,7 +777,7 @@ func ensureMessageBeforeTimeout(t *testing.T, ch <-chan tmpubsub.Message, to tim
// consensusLogger is a TestingLogger which uses a different
// color for each validator ("validator" key must exist).
func consensusLogger() log.Logger {
return log.TestingLogger().With("module", "consensus")
return log.NewNopLogger().With("module", "consensus")
}
func makeConsensusState(


+ 5
- 5
internal/consensus/mempool_test.go View File

@ -43,7 +43,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{
Validators: 1,
Power: 10})
cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
cs := newStateWithConfig(ctx, t, log.NewNopLogger(), config, state, privVals[0], NewCounterApplication())
assertMempool(t, cs.txNotifier).EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock)
@ -70,7 +70,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{
Validators: 1,
Power: 10})
cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
cs := newStateWithConfig(ctx, t, log.NewNopLogger(), config, state, privVals[0], NewCounterApplication())
assertMempool(t, cs.txNotifier).EnableTxsAvailable()
@ -95,7 +95,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{
Validators: 1,
Power: 10})
cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
cs := newStateWithConfig(ctx, t, log.NewNopLogger(), config, state, privVals[0], NewCounterApplication())
assertMempool(t, cs.txNotifier).EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock)
@ -142,7 +142,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
defer cancel()
config := configSetup(t)
logger := log.TestingLogger()
logger := log.NewNopLogger()
state, privVals := makeGenesisState(ctx, t, config, genesisStateArgs{
Validators: 1,
Power: 10})
@ -184,7 +184,7 @@ func TestMempoolRmBadTx(t *testing.T) {
app := NewCounterApplication()
stateStore := sm.NewStore(dbm.NewMemDB())
blockStore := store.NewBlockStore(dbm.NewMemDB())
cs := newStateWithConfigAndBlockStore(ctx, t, log.TestingLogger(), config, state, privVals[0], app, blockStore)
cs := newStateWithConfigAndBlockStore(ctx, t, log.NewNopLogger(), config, state, privVals[0], app, blockStore)
err := stateStore.Save(state)
require.NoError(t, err)


+ 1
- 1
internal/consensus/pbts_test.go View File

@ -114,7 +114,7 @@ func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfigurat
Time: tc.genesisTime,
Validators: validators,
})
cs := newState(ctx, t, log.TestingLogger(), state, privVals[0], kvstore.NewApplication())
cs := newState(ctx, t, log.NewNopLogger(), state, privVals[0], kvstore.NewApplication())
vss := make([]*validatorStub, validators)
for i := 0; i < validators; i++ {
vss[i] = newValidatorStub(privVals[i], int32(i))


+ 1
- 1
internal/consensus/peer_state_test.go View File

@ -10,7 +10,7 @@ import (
)
func peerStateSetup(h, r, v int) *PeerState {
ps := NewPeerState(log.TestingLogger(), "testPeerState")
ps := NewPeerState(log.NewNopLogger(), "testPeerState")
ps.PRS.Height = int64(h)
ps.PRS.Round = int32(r)
ps.ensureVoteBitArrays(int64(h), v)


+ 3
- 3
internal/consensus/reactor_test.go View File

@ -478,7 +478,7 @@ func TestReactorWithEvidence(t *testing.T) {
proxyAppConnCon := abciclient.NewLocalClient(logger, app)
mempool := mempool.NewTxMempool(
log.TestingLogger().With("module", "mempool"),
log.NewNopLogger().With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
)
@ -501,10 +501,10 @@ func TestReactorWithEvidence(t *testing.T) {
evpool2 := sm.EmptyEvidencePool{}
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
eventBus := eventbus.NewDefault(log.NewNopLogger().With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx, logger.With("validator", i, "module", "consensus"),
thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool2, eventBus)


+ 4
- 4
internal/consensus/replay_test.go View File

@ -55,7 +55,7 @@ import (
func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config,
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(t, consensusReplayConfig)
@ -682,7 +682,7 @@ func testHandshakeReplay(
cfg := sim.Config
logger := log.TestingLogger()
logger := log.NewNopLogger()
if testValidatorsChange {
testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_m", t.Name(), mode))
require.NoError(t, err)
@ -819,7 +819,7 @@ func applyBlock(
eventBus *eventbus.EventBus,
) sm.State {
testPartSize := types.BlockPartSizeBytes
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), appClient, mempool, evpool, blockStore, eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), appClient, mempool, evpool, blockStore, eventBus)
bps, err := blk.MakePartSet(testPartSize)
require.NoError(t, err)
@ -964,7 +964,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
store.chain = blocks
logger := log.TestingLogger()
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))


+ 1
- 1
internal/consensus/wal_generator.go View File

@ -80,7 +80,7 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
mempool := emptyMempool{}
evpool := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp, mempool, evpool, blockStore, eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyApp, mempool, evpool, blockStore, eventBus)
consensusState, err := NewState(ctx, logger, cfg.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
if err != nil {
t.Fatal(err)


+ 3
- 3
internal/consensus/wal_test.go View File

@ -26,7 +26,7 @@ const walTestFlushInterval = 100 * time.Millisecond
func TestWALTruncate(t *testing.T) {
walDir := t.TempDir()
walFile := filepath.Join(walDir, "wal")
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -108,7 +108,7 @@ func TestWALWrite(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wal, err := NewWAL(ctx, log.TestingLogger(), walFile)
wal, err := NewWAL(ctx, log.NewNopLogger(), walFile)
require.NoError(t, err)
err = wal.Start(ctx)
require.NoError(t, err)
@ -177,7 +177,7 @@ func TestWALPeriodicSync(t *testing.T) {
walFile := filepath.Join(walDir, "wal")
defer os.RemoveAll(walFile)
wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(250*time.Millisecond))
wal, err := NewWAL(ctx, log.NewNopLogger(), walFile, autofile.GroupCheckDuration(250*time.Millisecond))
require.NoError(t, err)
wal.SetFlushInterval(walTestFlushInterval)


+ 8
- 8
internal/eventbus/event_bus_test.go View File

@ -22,7 +22,7 @@ func TestEventBusPublishEventTx(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger())
eventBus := eventbus.NewDefault(log.NewNopLogger())
err := eventBus.Start(ctx)
require.NoError(t, err)
@ -75,7 +75,7 @@ func TestEventBusPublishEventTx(t *testing.T) {
func TestEventBusPublishEventNewBlock(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger())
eventBus := eventbus.NewDefault(log.NewNopLogger())
err := eventBus.Start(ctx)
require.NoError(t, err)
@ -129,7 +129,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) {
func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger())
eventBus := eventbus.NewDefault(log.NewNopLogger())
err := eventBus.Start(ctx)
require.NoError(t, err)
@ -247,7 +247,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger())
eventBus := eventbus.NewDefault(log.NewNopLogger())
err := eventBus.Start(ctx)
require.NoError(t, err)
@ -297,7 +297,7 @@ func TestEventBusPublishEventEvidenceValidated(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger())
eventBus := eventbus.NewDefault(log.NewNopLogger())
err := eventBus.Start(ctx)
require.NoError(t, err)
@ -339,7 +339,7 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger())
eventBus := eventbus.NewDefault(log.NewNopLogger())
err := eventBus.Start(ctx)
require.NoError(t, err)
@ -381,7 +381,7 @@ func TestEventBusPublish(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger())
eventBus := eventbus.NewDefault(log.NewNopLogger())
err := eventBus.Start(ctx)
require.NoError(t, err)
@ -467,7 +467,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eventBus := eventbus.NewDefault(log.TestingLogger()) // set buffer capacity to 0 so we are not testing cache
eventBus := eventbus.NewDefault(log.NewNopLogger()) // set buffer capacity to 0 so we are not testing cache
err := eventBus.Start(ctx)
if err != nil {
b.Error(err)


+ 1
- 1
internal/evidence/reactor_test.go View File

@ -56,7 +56,7 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint
numStateStores := len(stateStores)
rts := &reactorTestSuite{
numStateStores: numStateStores,
logger: log.TestingLogger().With("testCase", t.Name()),
logger: log.NewNopLogger().With("testCase", t.Name()),
network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numStateStores}),
reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores),
pools: make(map[types.NodeID]*evidence.Pool, numStateStores),


+ 1
- 1
internal/evidence/verify_test.go View File

@ -98,7 +98,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header})
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
pool := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
pool := evidence.NewPool(log.NewNopLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
evList := types.EvidenceList{ev}
// check that the evidence pool correctly verifies the evidence


+ 12
- 12
internal/inspect/inspect_test.go View File

@ -31,7 +31,7 @@ import (
func TestInspectConstructor(t *testing.T) {
cfg, err := config.ResetTestRoot(t.TempDir(), "test")
require.NoError(t, err)
testLogger := log.TestingLogger()
testLogger := log.NewNopLogger()
t.Cleanup(leaktest.Check(t))
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
t.Run("from config", func(t *testing.T) {
@ -47,7 +47,7 @@ func TestInspectRun(t *testing.T) {
cfg, err := config.ResetTestRoot(t.TempDir(), "test")
require.NoError(t, err)
testLogger := log.TestingLogger()
testLogger := log.NewNopLogger()
t.Cleanup(leaktest.Check(t))
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
t.Run("from config", func(t *testing.T) {
@ -85,7 +85,7 @@ func TestBlock(t *testing.T) {
eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock"))
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
@ -132,7 +132,7 @@ func TestTxSearch(t *testing.T) {
Return([]*abcitypes.TxResult{testTxResult}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
@ -179,7 +179,7 @@ func TestTx(t *testing.T) {
}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
@ -227,7 +227,7 @@ func TestConsensusParams(t *testing.T) {
eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock"))
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
@ -280,7 +280,7 @@ func TestBlockResults(t *testing.T) {
eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock"))
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
@ -328,7 +328,7 @@ func TestCommit(t *testing.T) {
eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock"))
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
@ -382,7 +382,7 @@ func TestBlockByHash(t *testing.T) {
eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock"))
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
@ -435,7 +435,7 @@ func TestBlockchain(t *testing.T) {
eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock"))
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
@ -488,7 +488,7 @@ func TestValidators(t *testing.T) {
eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock"))
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
@ -547,7 +547,7 @@ func TestBlockSearch(t *testing.T) {
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
Return([]int64{testHeight}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
l := log.NewNopLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())


+ 7
- 7
internal/libs/autofile/group_test.go View File

@ -47,7 +47,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 1000*1000)
@ -116,7 +116,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
}
func TestRotateFile(t *testing.T) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -181,7 +181,7 @@ func TestRotateFile(t *testing.T) {
}
func TestWrite(t *testing.T) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -209,7 +209,7 @@ func TestWrite(t *testing.T) {
// test that Read reads the required amount of bytes from all the files in the
// group and returns no error if n == size of the given slice.
func TestGroupReaderRead(t *testing.T) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -247,7 +247,7 @@ func TestGroupReaderRead(t *testing.T) {
// test that Read returns an error if number of bytes read < size of
// the given slice. Subsequent call should return 0, io.EOF.
func TestGroupReaderRead2(t *testing.T) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -287,7 +287,7 @@ func TestGroupReaderRead2(t *testing.T) {
}
func TestMinIndex(t *testing.T) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -300,7 +300,7 @@ func TestMinIndex(t *testing.T) {
}
func TestMaxIndex(t *testing.T) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()


+ 1
- 1
internal/mempool/mempool_test.go View File

@ -75,7 +75,7 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
func setup(ctx context.Context, t testing.TB, app abciclient.Client, cacheSize int, options ...TxMempoolOption) *TxMempool {
t.Helper()
logger := log.TestingLogger()
logger := log.NewNopLogger()
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
require.NoError(t, err)


+ 10
- 10
internal/p2p/conn/connection_test.go View File

@ -54,7 +54,7 @@ func TestMConnectionSendFlushStop(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clientConn := createTestMConnection(log.TestingLogger(), client)
clientConn := createTestMConnection(log.NewNopLogger(), client)
err := clientConn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(clientConn))
@ -91,7 +91,7 @@ func TestMConnectionSend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mconn := createTestMConnection(log.TestingLogger(), client)
mconn := createTestMConnection(log.NewNopLogger(), client)
err := mconn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(mconn))
@ -133,7 +133,7 @@ func TestMConnectionReceive(t *testing.T) {
case <-ctx.Done():
}
}
logger := log.TestingLogger()
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -168,7 +168,7 @@ func TestMConnectionWillEventuallyTimeout(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, nil, nil)
mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, nil, nil)
err := mconn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(mconn))
@ -224,7 +224,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError)
mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError)
err := mconn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(mconn))
@ -282,7 +282,7 @@ func TestMConnectionMultiplePings(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError)
mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError)
err := mconn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(mconn))
@ -339,7 +339,7 @@ func TestMConnectionPingPongs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError)
mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError)
err := mconn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(mconn))
@ -398,7 +398,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError)
mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError)
err := mconn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(mconn))
@ -433,7 +433,7 @@ func newClientAndServerConnsForReadErrors(
{ID: 0x01, Priority: 1, SendQueueCapacity: 1},
{ID: 0x02, Priority: 1, SendQueueCapacity: 1},
}
logger := log.TestingLogger()
logger := log.NewNopLogger()
mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError, DefaultMConnConfig())
err := mconnClient.Start(ctx)
@ -563,7 +563,7 @@ func TestMConnectionTrySend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mconn := createTestMConnection(log.TestingLogger(), client)
mconn := createTestMConnection(log.NewNopLogger(), client)
err := mconn.Start(ctx)
require.NoError(t, err)
t.Cleanup(waitAll(mconn))


+ 1
- 1
internal/p2p/p2ptest/network.go View File

@ -50,7 +50,7 @@ func (opts *NetworkOptions) setDefaults() {
// connects them to each other.
func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network {
opts.setDefaults()
logger := log.TestingLogger()
logger := log.NewNopLogger()
network := &Network{
Nodes: map[types.NodeID]*Node{},
logger: logger,


+ 2
- 2
internal/p2p/pex/reactor_test.go View File

@ -303,7 +303,7 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor {
return pexCh, nil
}
reactor, err := pex.NewReactor(ctx, log.TestingLogger(), peerManager, chCreator, peerUpdates)
reactor, err := pex.NewReactor(ctx, log.NewNopLogger(), peerManager, chCreator, peerUpdates)
require.NoError(t, err)
require.NoError(t, reactor.Start(ctx))
@ -365,7 +365,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT
realNodes := opts.TotalNodes - opts.MockNodes
rts := &reactorTestSuite{
logger: log.TestingLogger().With("testCase", t.Name()),
logger: log.NewNopLogger().With("testCase", t.Name()),
network: p2ptest.MakeNetwork(ctx, t, networkOpts),
reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),


+ 1
- 1
internal/p2p/router_filter_test.go View File

@ -15,7 +15,7 @@ import (
func TestConnectionFiltering(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
filterByIPCount := 0
router := &Router{


+ 10
- 10
internal/p2p/router_test.go View File

@ -107,7 +107,7 @@ func TestRouter_Channel_Basic(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -410,7 +410,7 @@ func TestRouter_AcceptPeers(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -465,7 +465,7 @@ func TestRouter_AcceptPeers_Error(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -503,7 +503,7 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -555,7 +555,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -659,7 +659,7 @@ func TestRouter_DialPeers(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -745,7 +745,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -820,7 +820,7 @@ func TestRouter_EvictPeers(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -883,7 +883,7 @@ func TestRouter_ChannelCompatability(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,
@ -939,7 +939,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) {
router, err := p2p.NewRouter(
ctx,
log.TestingLogger(),
log.NewNopLogger(),
p2p.NopMetrics(),
selfInfo,
selfKey,


+ 4
- 4
internal/p2p/transport_mconn_test.go View File

@ -20,7 +20,7 @@ import (
func init() {
testTransports["mconn"] = func(t *testing.T) p2p.Transport {
transport := p2p.NewMConnTransport(
log.TestingLogger(),
log.NewNopLogger(),
conn.DefaultMConnConfig(),
[]*p2p.ChannelDescriptor{{ID: chID, Priority: 1}},
p2p.MConnTransportOptions{},
@ -40,7 +40,7 @@ func init() {
func TestMConnTransport_AcceptBeforeListen(t *testing.T) {
transport := p2p.NewMConnTransport(
log.TestingLogger(),
log.NewNopLogger(),
conn.DefaultMConnConfig(),
[]*p2p.ChannelDescriptor{{ID: chID, Priority: 1}},
p2p.MConnTransportOptions{
@ -63,7 +63,7 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) {
defer cancel()
transport := p2p.NewMConnTransport(
log.TestingLogger(),
log.NewNopLogger(),
conn.DefaultMConnConfig(),
[]*p2p.ChannelDescriptor{{ID: chID, Priority: 1}},
p2p.MConnTransportOptions{
@ -153,7 +153,7 @@ func TestMConnTransport_Listen(t *testing.T) {
t.Cleanup(leaktest.Check(t))
transport := p2p.NewMConnTransport(
log.TestingLogger(),
log.NewNopLogger(),
conn.DefaultMConnConfig(),
[]*p2p.ChannelDescriptor{{ID: chID, Priority: 1}},
p2p.MConnTransportOptions{},


+ 1
- 1
internal/p2p/transport_memory_test.go View File

@ -19,7 +19,7 @@ func init() {
testTransports["memory"] = func(t *testing.T) p2p.Transport {
if network == nil {
network = p2p.NewMemoryNetwork(log.TestingLogger(), 1)
network = p2p.NewMemoryNetwork(log.NewNopLogger(), 1)
}
i := byte(network.Size())
nodeID, err := types.NewNodeID(hex.EncodeToString(bytes.Repeat([]byte{i<<4 + i}, 20)))


+ 5
- 5
internal/proxy/client_test.go View File

@ -58,7 +58,7 @@ var SOCKET = "socket"
func TestEcho(t *testing.T) {
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
logger := log.TestingLogger()
logger := log.NewNopLogger()
client, err := abciclient.NewClient(logger, sockPath, SOCKET, true)
if err != nil {
t.Fatal(err)
@ -98,7 +98,7 @@ func TestEcho(t *testing.T) {
func BenchmarkEcho(b *testing.B) {
b.StopTimer() // Initialize
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
logger := log.TestingLogger()
logger := log.NewNopLogger()
client, err := abciclient.NewClient(logger, sockPath, SOCKET, true)
if err != nil {
b.Fatal(err)
@ -146,7 +146,7 @@ func TestInfo(t *testing.T) {
defer cancel()
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
logger := log.TestingLogger()
logger := log.NewNopLogger()
client, err := abciclient.NewClient(logger, sockPath, SOCKET, true)
if err != nil {
t.Fatal(err)
@ -189,7 +189,7 @@ func TestAppConns_Start_Stop(t *testing.T) {
clientMock.On("Wait").Return(nil).Times(1)
cl := &noopStoppableClientImpl{Client: clientMock}
appConns := New(cl, log.TestingLogger(), NopMetrics())
appConns := New(cl, log.NewNopLogger(), NopMetrics())
err := appConns.Start(ctx)
require.NoError(t, err)
@ -219,7 +219,7 @@ func TestAppConns_Failure(t *testing.T) {
clientMock.On("Error").Return(errors.New("EOF"))
cl := &noopStoppableClientImpl{Client: clientMock}
appConns := New(cl, log.TestingLogger(), NopMetrics())
appConns := New(cl, log.NewNopLogger(), NopMetrics())
err := appConns.Start(ctx)
require.NoError(t, err)


+ 1
- 1
internal/pubsub/example_test.go View File

@ -16,7 +16,7 @@ func TestExample(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := newTestServer(ctx, t, log.TestingLogger())
s := newTestServer(ctx, t, log.NewNopLogger())
sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
ClientID: "example-client",


+ 14
- 14
internal/pubsub/pubsub_test.go View File

@ -30,7 +30,7 @@ func TestSubscribeWithArgs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
t.Run("DefaultLimit", func(t *testing.T) {
@ -59,7 +59,7 @@ func TestSubscribeWithArgs(t *testing.T) {
func TestObserver(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
@ -81,7 +81,7 @@ func TestObserverErrors(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
@ -94,7 +94,7 @@ func TestPublishDoesNotBlock(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
@ -124,7 +124,7 @@ func TestSubscribeErrors(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
t.Run("NegativeLimitErr", func(t *testing.T) {
@ -141,7 +141,7 @@ func TestSlowSubscriber(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
@ -163,7 +163,7 @@ func TestDifferentClients(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
@ -218,7 +218,7 @@ func TestSubscribeDuplicateKeys(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
testCases := []struct {
@ -274,7 +274,7 @@ func TestClientSubscribesTwice(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
q := query.MustCompile(`tm.events.type='NewBlock'`)
@ -310,7 +310,7 @@ func TestUnsubscribe(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
@ -335,7 +335,7 @@ func TestClientUnsubscribesTwice(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
@ -357,7 +357,7 @@ func TestResubscribe(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
args := pubsub.SubscribeArgs{
@ -381,7 +381,7 @@ func TestUnsubscribeAll(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := newTestServer(ctx, t, logger)
sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
@ -402,7 +402,7 @@ func TestUnsubscribeAll(t *testing.T) {
}
func TestBufferCapacity(t *testing.T) {
logger := log.TestingLogger()
logger := log.NewNopLogger()
s := pubsub.NewServer(logger, pubsub.BufferCapacity(2))
require.Equal(t, 2, s.BufferCapacity())


+ 15
- 15
internal/state/execution_test.go View File

@ -38,7 +38,7 @@ var (
func TestApplyBlock(t *testing.T) {
app := &testApp{}
logger := log.TestingLogger()
logger := log.NewNopLogger()
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
@ -85,7 +85,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
app := &testApp{}
cc := abciclient.NewLocalClient(logger, app)
appClient := proxy.New(cc, logger, proxy.NopMetrics())
@ -128,7 +128,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) {
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), appClient, mp, evpool, blockStore, eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), appClient, mp, evpool, blockStore, eventBus)
state, _, lastCommit := makeAndCommitGoodBlock(ctx, t, state, 1, new(types.Commit), state.NextValidators.Validators[0].Address, blockExec, privVals, nil)
for idx, isAbsent := range tc.absentCommitSigs {
@ -160,7 +160,7 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) {
defer cancel()
app := &testApp{}
logger := log.TestingLogger()
logger := log.NewNopLogger()
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
@ -252,7 +252,7 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) {
blockStore := store.NewBlockStore(dbm.NewMemDB())
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp,
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyApp,
mp, evpool, blockStore, eventBus)
block := sf.MakeBlock(state, 1, new(types.Commit))
@ -277,7 +277,7 @@ func TestProcessProposal(t *testing.T) {
defer cancel()
app := abcimocks.NewBaseMock()
logger := log.TestingLogger()
logger := log.NewNopLogger()
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
@ -482,7 +482,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) {
defer cancel()
app := &testApp{}
logger := log.TestingLogger()
logger := log.NewNopLogger()
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
@ -565,7 +565,7 @@ func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
defer cancel()
app := &testApp{}
logger := log.TestingLogger()
logger := log.NewNopLogger()
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
@ -579,7 +579,7 @@ func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
blockStore := store.NewBlockStore(dbm.NewMemDB())
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
log.NewNopLogger(),
proxyApp,
new(mpmocks.Mempool),
sm.EmptyEvidencePool{},
@ -609,7 +609,7 @@ func TestEmptyPrepareProposal(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
@ -657,7 +657,7 @@ func TestPrepareProposalPanicOnInvalid(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
@ -716,7 +716,7 @@ func TestPrepareProposalRemoveTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
@ -777,7 +777,7 @@ func TestPrepareProposalAddedTxsIncluded(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
@ -836,7 +836,7 @@ func TestPrepareProposalReorderTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
@ -894,7 +894,7 @@ func TestPrepareProposalModifiedTxFalse(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))


+ 1
- 1
internal/state/indexer/indexer_service_test.go View File

@ -43,7 +43,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := tmlog.TestingLogger()
logger := tmlog.NewNopLogger()
// event bus
eventBus := eventbus.NewDefault(logger)
err := eventBus.Start(ctx)


+ 4
- 4
internal/state/validation_test.go View File

@ -33,7 +33,7 @@ const validationTestsStopHeight int64 = 10
func TestValidateBlockHeader(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx))
@ -136,7 +136,7 @@ func TestValidateBlockCommit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx))
@ -277,7 +277,7 @@ func TestValidateBlockEvidence(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
logger := log.NewNopLogger()
proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx))
@ -309,7 +309,7 @@ func TestValidateBlockEvidence(t *testing.T) {
state.ConsensusParams.Evidence.MaxBytes = 1000
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
log.NewNopLogger(),
proxyApp,
mp,
evpool,


+ 9
- 18
libs/log/testing.go View File

@ -6,24 +6,6 @@ import (
"github.com/rs/zerolog"
)
// TestingLogger was a legacy constructor that wrote logging output to
// standardoutput when in verbose mode, and no-op'ed test logs
// otherwise. Now it always no-ops, but if you need to see logs from
// tests, you can replace this call with `NewTestingLogger`
// constructor.
func TestingLogger() Logger {
return NewNopLogger()
}
type testingWriter struct {
t testing.TB
}
func (tw testingWriter) Write(in []byte) (int, error) {
tw.t.Log(string(in))
return len(in), nil
}
// NewTestingLogger converts a testing.T into a logging interface to
// make test failures and verbose provide better feedback associated
// with test failures. This logging instance is safe for use from
@ -58,3 +40,12 @@ func NewTestingLoggerWithLevel(t testing.TB, level string) Logger {
Logger: zerolog.New(newSyncWriter(testingWriter{t})).Level(logLevel),
}
}
type testingWriter struct {
t testing.TB
}
func (tw testingWriter) Write(in []byte) (int, error) {
tw.t.Log(string(in))
return len(in), nil
}

+ 14
- 14
light/client_test.go View File

@ -224,7 +224,7 @@ func TestClient(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
mockNode := mockNodeFromHeadersAndVals(testCase.otherHeaders, testCase.vals)
mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
@ -351,7 +351,7 @@ func TestClient(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(bctx)
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
mockNode := mockNodeFromHeadersAndVals(tc.otherHeaders, tc.vals)
mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
@ -466,7 +466,7 @@ func TestClient(t *testing.T) {
t.Run("Cleanup", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
@ -503,7 +503,7 @@ func TestClient(t *testing.T) {
ctx, cancel := context.WithCancel(bctx)
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
mockNode := &provider_mocks.Provider{}
trustedStore := dbs.New(dbm.NewMemDB())
@ -538,7 +538,7 @@ func TestClient(t *testing.T) {
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
// header1 != h1
header1 := keys.GenSignedHeader(t, chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals,
@ -584,7 +584,7 @@ func TestClient(t *testing.T) {
mockWitnessNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
mockWitnessNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil)
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
c, err := light.NewClient(
ctx,
@ -611,7 +611,7 @@ func TestClient(t *testing.T) {
t.Run("Concurrency", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil)
@ -664,7 +664,7 @@ func TestClient(t *testing.T) {
1: h1,
2: h2,
}, valSet)
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
c, err := light.NewClient(
ctx,
@ -705,7 +705,7 @@ func TestClient(t *testing.T) {
mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse)
mockDeadNode.On("ID").Return("mockDeadNode")
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
c, err := light.NewClient(
ctx,
@ -738,7 +738,7 @@ func TestClient(t *testing.T) {
mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil)
mockFullNode.On("ID").Return("mockFullNode")
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
mockDeadNode1 := &provider_mocks.Provider{}
mockDeadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
@ -770,7 +770,7 @@ func TestClient(t *testing.T) {
t.Run("BackwardsVerification", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
{
headers, vals, _ := genLightBlocksWithKeys(t, chainID, 9, 3, 0, bTime)
@ -886,7 +886,7 @@ func TestClient(t *testing.T) {
mockWitness.AssertExpectations(t)
})
t.Run("RemovesWitnessIfItSendsUsIncorrectHeader", func(t *testing.T) {
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
// different headers hash then primary plus less than 1/3 signed (no fork)
headers1 := map[int64]*types.SignedHeader{
@ -959,7 +959,7 @@ func TestClient(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
differentVals, _ := factory.ValidatorSet(ctx, t, 10, 100)
mockBadValSetNode := mockNodeFromHeadersAndVals(
@ -1043,7 +1043,7 @@ func TestClient(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
c, err := light.NewClient(
ctx,


+ 7
- 7
light/detector_test.go View File

@ -21,7 +21,7 @@ import (
)
func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
// primary performs a lunatic attack
var (
@ -144,7 +144,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) {
ctx, cancel := context.WithCancel(bctx)
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
// primary performs an equivocation attack
var (
@ -248,7 +248,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(t, chainID, latestHeight, valSize, 2, bTime)
for _, unusedHeader := range []int64{3, 5, 6, 8} {
@ -411,7 +411,7 @@ func TestClientDivergentTraces1(t *testing.T) {
mockWitness := mockNodeFromHeadersAndVals(headers, vals)
mockWitness.On("ID").Return("mockWitness")
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
_, err = light.NewClient(
ctx,
@ -437,7 +437,7 @@ func TestClientDivergentTraces1(t *testing.T) {
func TestClientDivergentTraces2(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
headers, vals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime)
mockPrimaryNode := mockNodeFromHeadersAndVals(headers, vals)
@ -482,7 +482,7 @@ func TestClientDivergentTraces2(t *testing.T) {
// => creation should succeed, but the verification should fail
//nolint: dupl
func TestClientDivergentTraces3(t *testing.T) {
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
//
primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime)
@ -527,7 +527,7 @@ func TestClientDivergentTraces3(t *testing.T) {
// It should be ignored
//nolint: dupl
func TestClientDivergentTraces4(t *testing.T) {
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
//
primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime)


+ 3
- 3
light/light_test.go View File

@ -30,7 +30,7 @@ func TestClientIntegration_Update(t *testing.T) {
conf, err := rpctest.CreateConfig(t, t.Name())
require.NoError(t, err)
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
// Start a test application
app := kvstore.NewApplication()
@ -90,7 +90,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
conf, err := rpctest.CreateConfig(t, t.Name())
require.NoError(t, err)
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
// Start a test application
app := kvstore.NewApplication()
@ -201,7 +201,7 @@ func TestClientStatusRPC(t *testing.T) {
primary,
witnesses,
dbs.New(db),
light.Logger(log.TestingLogger()),
light.Logger(log.NewNopLogger()),
)
require.NoError(t, err)


Loading…
Cancel
Save