Browse Source

lint: add errchecks (#5316)

## Description

Work towards enabling errcheck

ref #5059
pull/5333/head
Marko 4 years ago
committed by GitHub
parent
commit
b8d08b9ef4
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 420 additions and 143 deletions
  1. +3
    -1
      abci/client/grpc_client.go
  2. +6
    -2
      abci/client/socket_client.go
  3. +14
    -3
      abci/tests/test_app/main.go
  4. +6
    -2
      behaviour/reporter_test.go
  5. +1
    -2
      cmd/tendermint/commands/debug/io.go
  6. +3
    -1
      cmd/tendermint/commands/reset_priv_validator.go
  7. +3
    -1
      cmd/tendermint/commands/run_node.go
  8. +6
    -3
      consensus/byzantine_test.go
  9. +4
    -1
      consensus/common_test.go
  10. +4
    -1
      consensus/invalid_test.go
  11. +10
    -4
      consensus/reactor_test.go
  12. +29
    -7
      consensus/replay_test.go
  13. +9
    -3
      consensus/state.go
  14. +6
    -2
      consensus/state_test.go
  15. +9
    -3
      consensus/wal.go
  16. +20
    -5
      consensus/wal_generator.go
  17. +15
    -5
      consensus/wal_test.go
  18. +1
    -1
      crypto/hash.go
  19. +3
    -3
      crypto/merkle/proof_value.go
  20. +16
    -4
      crypto/xchacha20poly1305/xchachapoly_test.go
  21. +6
    -2
      mempool/bench_test.go
  22. +4
    -2
      mempool/cache_test.go
  23. +32
    -15
      mempool/clist_mempool_test.go
  24. +15
    -5
      mempool/reactor_test.go
  25. +12
    -4
      node/node.go
  26. +12
    -4
      p2p/peer_test.go
  27. +6
    -2
      p2p/switch.go
  28. +26
    -14
      p2p/switch_test.go
  29. +12
    -4
      p2p/trust/store.go
  30. +18
    -4
      privval/signer_listener_endpoint_test.go
  31. +12
    -4
      proxy/multi_app_conn.go
  32. +7
    -2
      proxy/multi_app_conn_test.go
  33. +4
    -2
      rpc/client/rpc_test.go
  34. +4
    -1
      rpc/jsonrpc/client/ws_client.go
  35. +3
    -1
      rpc/jsonrpc/server/ws_handler.go
  36. +16
    -5
      state/txindex/indexer_service_test.go
  37. +3
    -1
      types/block.go
  38. +2
    -2
      types/block_test.go
  39. +3
    -1
      types/event_bus.go
  40. +43
    -9
      types/event_bus_test.go
  41. +2
    -1
      types/genesis_test.go
  42. +4
    -1
      types/params.go
  43. +6
    -3
      types/vote_set_test.go

+ 3
- 1
abci/client/grpc_client.go View File

@ -99,7 +99,9 @@ func (cli *grpcClient) StopForError(err error) {
cli.mtx.Unlock() cli.mtx.Unlock()
cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error())) cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
cli.Stop()
if err := cli.Stop(); err != nil {
cli.Logger.Error("Error stopping abci.grpcClient", "err", err)
}
} }
func (cli *grpcClient) Error() error { func (cli *grpcClient) Error() error {


+ 6
- 2
abci/client/socket_client.go View File

@ -413,7 +413,9 @@ func (cli *socketClient) LoadSnapshotChunkSync(
func (cli *socketClient) ApplySnapshotChunkSync( func (cli *socketClient) ApplySnapshotChunkSync(
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req)) reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
cli.FlushSync()
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetApplySnapshotChunk(), cli.Error() return reqres.Response.GetApplySnapshotChunk(), cli.Error()
} }
@ -500,5 +502,7 @@ func (cli *socketClient) stopForError(err error) {
cli.mtx.Unlock() cli.mtx.Unlock()
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error())) cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
cli.Stop()
if err := cli.Stop(); err != nil {
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
}
} }

+ 14
- 3
abci/tests/test_app/main.go View File

@ -59,15 +59,26 @@ func testCounter() {
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
log.Fatalf("starting %q err: %v", abciApp, err) log.Fatalf("starting %q err: %v", abciApp, err)
} }
defer cmd.Wait()
defer cmd.Process.Kill()
defer func() {
if err := cmd.Wait(); err != nil {
log.Printf("error while waiting for cmd to exit: %v", err)
}
if err := cmd.Process.Kill(); err != nil {
log.Printf("error on process kill: %v", err)
}
}()
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil { if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
log.Fatalf("echo failed: %v", err) log.Fatalf("echo failed: %v", err)
} }
client := startClient(abciType) client := startClient(abciType)
defer client.Stop()
defer func() {
if err := client.Stop(); err != nil {
log.Printf("error trying client stop: %v", err)
}
}()
setOption(client, "serial", "on") setOption(client, "serial", "on")
commit(client, nil) commit(client, nil)


+ 6
- 2
behaviour/reporter_test.go View File

@ -20,7 +20,9 @@ func TestMockReporter(t *testing.T) {
} }
badMessage := bh.BadMessage(peerID, "bad message") badMessage := bh.BadMessage(peerID, "bad message")
pr.Report(badMessage)
if err := pr.Report(badMessage); err != nil {
t.Error(err)
}
behaviours = pr.GetBehaviours(peerID) behaviours = pr.GetBehaviours(peerID)
if len(behaviours) != 1 { if len(behaviours) != 1 {
t.Error("Expected the peer have one reported behaviour") t.Error("Expected the peer have one reported behaviour")
@ -164,7 +166,9 @@ func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
for { for {
select { select {
case pb := <-scriptItems: case pb := <-scriptItems:
pr.Report(pb.behaviour)
if err := pr.Report(pb.behaviour); err != nil {
t.Error(err)
}
case <-done: case <-done:
return return
} }


+ 1
- 2
cmd/tendermint/commands/debug/io.go View File

@ -28,7 +28,7 @@ func zipDir(src, dest string) error {
dirName := filepath.Base(dest) dirName := filepath.Base(dest)
baseDir := strings.TrimSuffix(dirName, filepath.Ext(dirName)) baseDir := strings.TrimSuffix(dirName, filepath.Ext(dirName))
filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
return filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
return err return err
} }
@ -69,7 +69,6 @@ func zipDir(src, dest string) error {
return err return err
}) })
return nil
} }
// copyFile copies a file from src to dest and returns an error upon failure. The // copyFile copies a file from src to dest and returns an error upon failure. The


+ 3
- 1
cmd/tendermint/commands/reset_priv_validator.go View File

@ -58,7 +58,9 @@ func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
} }
// recreate the dbDir since the privVal state needs to live there // recreate the dbDir since the privVal state needs to live there
tmos.EnsureDir(dbDir, 0700)
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
resetFilePV(privValKeyFile, privValStateFile, logger) resetFilePV(privValKeyFile, privValStateFile, logger)
} }


+ 3
- 1
cmd/tendermint/commands/run_node.go View File

@ -120,7 +120,9 @@ func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
// Stop upon receiving SIGTERM or CTRL-C. // Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() { tmos.TrapSignal(logger, func() {
if n.IsRunning() { if n.IsRunning() {
n.Stop()
if err := n.Stop(); err != nil {
logger.Error("unable to stop the node", "error", err)
}
} }
}) })


+ 6
- 3
consensus/byzantine_test.go View File

@ -84,7 +84,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
eventBus := types.NewEventBus() eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events")) eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start()
err = eventBus.Start()
require.NoError(t, err)
cs.SetEventBus(eventBus) cs.SetEventBus(eventBus)
cs.SetTimeoutTicker(tickerFunc()) cs.SetTimeoutTicker(tickerFunc())
@ -254,9 +255,11 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
defer func() { defer func() {
for _, r := range reactors { for _, r := range reactors {
if rr, ok := r.(*ByzantineReactor); ok { if rr, ok := r.(*ByzantineReactor); ok {
rr.reactor.Switch.Stop()
err := rr.reactor.Switch.Stop()
require.NoError(t, err)
} else { } else {
r.(*Reactor).Switch.Stop()
err := r.(*Reactor).Switch.Stop()
require.NoError(t, err)
} }
} }
}() }()


+ 4
- 1
consensus/common_test.go View File

@ -398,7 +398,10 @@ func newStateWithConfigAndBlockStore(
eventBus := types.NewEventBus() eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events")) eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start()
err := eventBus.Start()
if err != nil {
panic(err)
}
cs.SetEventBus(eventBus) cs.SetEventBus(eventBus)
return cs return cs
} }


+ 4
- 1
consensus/invalid_test.go View File

@ -83,7 +83,10 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}}, PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}},
} }
p := precommit.ToProto() p := precommit.ToProto()
cs.privValidator.SignVote(cs.state.ChainID, p)
err = cs.privValidator.SignVote(cs.state.ChainID, p)
if err != nil {
t.Error(err)
}
precommit.Signature = p.Signature precommit.Signature = p.Signature
cs.privValidator = nil // disable priv val so we don't do normal votes cs.privValidator = nil // disable priv val so we don't do normal votes
cs.mtx.Unlock() cs.mtx.Unlock()


+ 10
- 4
consensus/reactor_test.go View File

@ -89,11 +89,15 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type
logger.Info("stopConsensusNet", "n", len(reactors)) logger.Info("stopConsensusNet", "n", len(reactors))
for i, r := range reactors { for i, r := range reactors {
logger.Info("stopConsensusNet: Stopping Reactor", "i", i) logger.Info("stopConsensusNet: Stopping Reactor", "i", i)
r.Switch.Stop()
if err := r.Switch.Stop(); err != nil {
logger.Error("error trying to stop switch", "error", err)
}
} }
for i, b := range eventBuses { for i, b := range eventBuses {
logger.Info("stopConsensusNet: Stopping eventBus", "i", i) logger.Info("stopConsensusNet: Stopping eventBus", "i", i)
b.Stop()
if err := b.Stop(); err != nil {
logger.Error("error trying to stop eventbus", "error", err)
}
} }
logger.Info("stopConsensusNet: DONE", "n", len(reactors)) logger.Info("stopConsensusNet: DONE", "n", len(reactors))
} }
@ -167,7 +171,8 @@ func TestReactorWithEvidence(t *testing.T) {
eventBus := types.NewEventBus() eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events")) eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start()
err := eventBus.Start()
require.NoError(t, err)
cs.SetEventBus(eventBus) cs.SetEventBus(eventBus)
cs.SetTimeoutTicker(tickerFunc()) cs.SetTimeoutTicker(tickerFunc())
@ -670,7 +675,8 @@ func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) {
t.Log("") t.Log("")
} }
os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n"))
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
require.NoError(t, err)
capture() capture()
panic("Timed out waiting for all validators to commit a block") panic("Timed out waiting for all validators to commit a block")
} }


+ 29
- 7
consensus/replay_test.go View File

@ -107,7 +107,9 @@ func sendTxs(ctx context.Context, cs *State) {
return return
default: default:
tx := []byte{byte(i)} tx := []byte{byte(i)}
assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{})
if err := assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{}); err != nil {
panic(err)
}
i++ i++
} }
} }
@ -196,7 +198,7 @@ LOOP:
startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB) startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
// stop consensus state and transactions sender (initFn) // stop consensus state and transactions sender (initFn)
cs.Stop() // Logging this error causes failure
cs.Stop() //nolint:errcheck // Logging this error causes failure
cancel() cancel()
// if we reached the required height, exit // if we reached the required height, exit
@ -683,7 +685,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
wal.SetLogger(log.TestingLogger()) wal.SetLogger(log.TestingLogger())
err = wal.Start() err = wal.Start()
require.NoError(t, err) require.NoError(t, err)
defer wal.Stop()
t.Cleanup(func() {
if err := wal.Stop(); err != nil {
t.Error(err)
}
})
chain, commits, err = makeBlockchainFromWAL(wal) chain, commits, err = makeBlockchainFromWAL(wal)
require.NoError(t, err) require.NoError(t, err)
pubKey, err := privVal.GetPubKey() pubKey, err := privVal.GetPubKey()
@ -730,7 +736,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
t.Fatalf("Error starting proxy app connections: %v", err) t.Fatalf("Error starting proxy app connections: %v", err)
} }
defer proxyApp.Stop()
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
err := handshaker.Handshake(proxyApp) err := handshaker.Handshake(proxyApp)
if expectError { if expectError {
@ -896,7 +906,11 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
proxyApp := proxy.NewAppConns(clientCreator) proxyApp := proxy.NewAppConns(clientCreator)
err := proxyApp.Start() err := proxyApp.Start()
require.NoError(t, err) require.NoError(t, err)
defer proxyApp.Stop()
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
assert.Panics(t, func() { assert.Panics(t, func() {
h := NewHandshaker(stateDB, state, store, genDoc) h := NewHandshaker(stateDB, state, store, genDoc)
@ -916,7 +930,11 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
proxyApp := proxy.NewAppConns(clientCreator) proxyApp := proxy.NewAppConns(clientCreator)
err := proxyApp.Start() err := proxyApp.Start()
require.NoError(t, err) require.NoError(t, err)
defer proxyApp.Stop()
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
assert.Panics(t, func() { assert.Panics(t, func() {
h := NewHandshaker(stateDB, state, store, genDoc) h := NewHandshaker(stateDB, state, store, genDoc)
@ -1211,7 +1229,11 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
if err := proxyApp.Start(); err != nil { if err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err) t.Fatalf("Error starting proxy app connections: %v", err)
} }
defer proxyApp.Stop()
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
if err := handshaker.Handshake(proxyApp); err != nil { if err := handshaker.Handshake(proxyApp); err != nil {
t.Fatalf("Error on abci handshake: %v", err) t.Fatalf("Error on abci handshake: %v", err)
} }


+ 9
- 3
consensus/state.go View File

@ -405,8 +405,12 @@ func (cs *State) loadWalFile() error {
// OnStop implements service.Service. // OnStop implements service.Service.
func (cs *State) OnStop() { func (cs *State) OnStop() {
cs.evsw.Stop()
cs.timeoutTicker.Stop()
if err := cs.evsw.Stop(); err != nil {
cs.Logger.Error("error trying to stop eventSwitch", "error", err)
}
if err := cs.timeoutTicker.Stop(); err != nil {
cs.Logger.Error("error trying to stop timeoutTicket", "error", err)
}
// WAL is stopped in receiveRoutine. // WAL is stopped in receiveRoutine.
} }
@ -678,7 +682,9 @@ func (cs *State) receiveRoutine(maxSteps int) {
// priv_val tracks LastSig // priv_val tracks LastSig
// close wal now that we're done writing to it // close wal now that we're done writing to it
cs.wal.Stop()
if err := cs.wal.Stop(); err != nil {
cs.Logger.Error("error trying to stop wal", "error", err)
}
cs.wal.Wait() cs.wal.Wait()
close(cs.done) close(cs.done)


+ 6
- 2
consensus/state_test.go View File

@ -248,11 +248,15 @@ func TestStateFullRound1(t *testing.T) {
// NOTE: buffer capacity of 0 ensures we can validate prevote and last commit // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit
// before consensus can move to the next height (and cause a race condition) // before consensus can move to the next height (and cause a race condition)
cs.eventBus.Stop()
if err := cs.eventBus.Stop(); err != nil {
t.Error(err)
}
eventBus := types.NewEventBusWithBufferCapacity(0) eventBus := types.NewEventBusWithBufferCapacity(0)
eventBus.SetLogger(log.TestingLogger().With("module", "events")) eventBus.SetLogger(log.TestingLogger().With("module", "events"))
cs.SetEventBus(eventBus) cs.SetEventBus(eventBus)
eventBus.Start()
if err := eventBus.Start(); err != nil {
t.Error(err)
}
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)


+ 9
- 3
consensus/wal.go View File

@ -126,7 +126,9 @@ func (wal *BaseWAL) OnStart() error {
if err != nil { if err != nil {
return err return err
} else if size == 0 { } else if size == 0 {
wal.WriteSync(EndHeightMessage{0})
if err := wal.WriteSync(EndHeightMessage{0}); err != nil {
return err
}
} }
err = wal.group.Start() err = wal.group.Start()
if err != nil { if err != nil {
@ -161,8 +163,12 @@ func (wal *BaseWAL) FlushAndSync() error {
// before cleaning up files. // before cleaning up files.
func (wal *BaseWAL) OnStop() { func (wal *BaseWAL) OnStop() {
wal.flushTicker.Stop() wal.flushTicker.Stop()
wal.FlushAndSync()
wal.group.Stop()
if err := wal.FlushAndSync(); err != nil {
wal.Logger.Error("error on flush data to disk", "error", err)
}
if err := wal.group.Stop(); err != nil {
wal.Logger.Error("error trying to stop wal", "error", err)
}
wal.group.Close() wal.group.Close()
} }


+ 20
- 5
consensus/wal_generator.go View File

@ -61,14 +61,22 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
if err := proxyApp.Start(); err != nil { if err := proxyApp.Start(); err != nil {
return fmt.Errorf("failed to start proxy app connections: %w", err) return fmt.Errorf("failed to start proxy app connections: %w", err)
} }
defer proxyApp.Stop()
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
eventBus := types.NewEventBus() eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events")) eventBus.SetLogger(logger.With("module", "events"))
if err := eventBus.Start(); err != nil { if err := eventBus.Start(); err != nil {
return fmt.Errorf("failed to start event bus: %w", err) return fmt.Errorf("failed to start event bus: %w", err)
} }
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
mempool := emptyMempool{} mempool := emptyMempool{}
evpool := emptyEvidencePool{} evpool := emptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
@ -85,7 +93,10 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
numBlocksWritten := make(chan struct{}) numBlocksWritten := make(chan struct{})
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
// see wal.go#103 // see wal.go#103
wal.Write(EndHeightMessage{0})
if err := wal.Write(EndHeightMessage{0}); err != nil {
t.Error(err)
}
consensusState.wal = wal consensusState.wal = wal
if err := consensusState.Start(); err != nil { if err := consensusState.Start(); err != nil {
@ -94,10 +105,14 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
select { select {
case <-numBlocksWritten: case <-numBlocksWritten:
consensusState.Stop()
if err := consensusState.Stop(); err != nil {
t.Error(err)
}
return nil return nil
case <-time.After(1 * time.Minute): case <-time.After(1 * time.Minute):
consensusState.Stop()
if err := consensusState.Stop(); err != nil {
t.Error(err)
}
return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
} }
} }


+ 15
- 5
consensus/wal_test.go View File

@ -46,7 +46,9 @@ func TestWALTruncate(t *testing.T) {
err = wal.Start() err = wal.Start()
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
wal.Stop()
if err := wal.Stop(); err != nil {
t.Error(err)
}
// wait for the wal to finish shutting down so we // wait for the wal to finish shutting down so we
// can safely remove the directory // can safely remove the directory
wal.Wait() wal.Wait()
@ -60,7 +62,9 @@ func TestWALTruncate(t *testing.T) {
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
wal.FlushAndSync()
if err := wal.FlushAndSync(); err != nil {
t.Error(err)
}
h := int64(50) h := int64(50)
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
@ -115,7 +119,9 @@ func TestWALWrite(t *testing.T) {
err = wal.Start() err = wal.Start()
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
wal.Stop()
if err := wal.Stop(); err != nil {
t.Error(err)
}
// wait for the wal to finish shutting down so we // wait for the wal to finish shutting down so we
// can safely remove the directory // can safely remove the directory
wal.Wait() wal.Wait()
@ -191,7 +197,9 @@ func TestWALPeriodicSync(t *testing.T) {
require.NoError(t, wal.Start()) require.NoError(t, wal.Start())
defer func() { defer func() {
wal.Stop()
if err := wal.Stop(); err != nil {
t.Error(err)
}
wal.Wait() wal.Wait()
}() }()
@ -236,7 +244,9 @@ func benchmarkWalDecode(b *testing.B, n int) {
enc := NewWALEncoder(buf) enc := NewWALEncoder(buf)
data := nBytes(n) data := nBytes(n)
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()})
if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil {
b.Error(err)
}
encoded := buf.Bytes() encoded := buf.Bytes()


+ 1
- 1
crypto/hash.go View File

@ -6,6 +6,6 @@ import (
func Sha256(bytes []byte) []byte { func Sha256(bytes []byte) []byte {
hasher := sha256.New() hasher := sha256.New()
hasher.Write(bytes)
hasher.Write(bytes) //nolint:errcheck // ignore error
return hasher.Sum(nil) return hasher.Sum(nil)
} }

+ 3
- 3
crypto/merkle/proof_value.go View File

@ -80,13 +80,13 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
} }
value := args[0] value := args[0]
hasher := tmhash.New() hasher := tmhash.New()
hasher.Write(value) // does not error
hasher.Write(value) //nolint: errcheck // does not error
vhash := hasher.Sum(nil) vhash := hasher.Sum(nil)
bz := new(bytes.Buffer) bz := new(bytes.Buffer)
// Wrap <op.Key, vhash> to hash the KVPair. // Wrap <op.Key, vhash> to hash the KVPair.
encodeByteSlice(bz, op.key) // does not error
encodeByteSlice(bz, vhash) // does not error
encodeByteSlice(bz, op.key) //nolint: errcheck // does not error
encodeByteSlice(bz, vhash) //nolint: errcheck // does not error
kvhash := leafHash(bz.Bytes()) kvhash := leafHash(bz.Bytes())
if !bytes.Equal(kvhash, op.Proof.LeafHash) { if !bytes.Equal(kvhash, op.Proof.LeafHash) {


+ 16
- 4
crypto/xchacha20poly1305/xchachapoly_test.go View File

@ -23,10 +23,22 @@ func TestRandom(t *testing.T) {
pl := mr.Intn(16384) pl := mr.Intn(16384)
ad := make([]byte, al) ad := make([]byte, al)
plaintext := make([]byte, pl) plaintext := make([]byte, pl)
cr.Read(key[:])
cr.Read(nonce[:])
cr.Read(ad)
cr.Read(plaintext)
_, err := cr.Read(key[:])
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = cr.Read(nonce[:])
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = cr.Read(ad)
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = cr.Read(plaintext)
if err != nil {
t.Errorf("error on read: %w", err)
}
aead, err := New(key[:]) aead, err := New(key[:])
if err != nil { if err != nil {


+ 6
- 2
mempool/bench_test.go View File

@ -18,7 +18,9 @@ func BenchmarkReap(b *testing.B) {
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
tx := make([]byte, 8) tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i)) binary.BigEndian.PutUint64(tx, uint64(i))
mempool.CheckTx(tx, nil, TxInfo{})
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
b.Error(err)
}
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -35,7 +37,9 @@ func BenchmarkCheckTx(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
tx := make([]byte, 8) tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i)) binary.BigEndian.PutUint64(tx, uint64(i))
mempool.CheckTx(tx, nil, TxInfo{})
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
b.Error(err)
}
} }
} }


+ 4
- 2
mempool/cache_test.go View File

@ -20,7 +20,8 @@ func TestCacheRemove(t *testing.T) {
for i := 0; i < numTxs; i++ { for i := 0; i < numTxs; i++ {
// probability of collision is 2**-256 // probability of collision is 2**-256
txBytes := make([]byte, 32) txBytes := make([]byte, 32)
rand.Read(txBytes)
_, err := rand.Read(txBytes)
require.NoError(t, err)
txs[i] = txBytes txs[i] = txBytes
cache.Push(txBytes) cache.Push(txBytes)
// make sure its added to both the linked list and the map // make sure its added to both the linked list and the map
@ -67,7 +68,8 @@ func TestCacheAfterUpdate(t *testing.T) {
tx := types.Tx{byte(v)} tx := types.Tx{byte(v)}
updateTxs = append(updateTxs, tx) updateTxs = append(updateTxs, tx)
} }
mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
err := mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
for _, v := range tc.reAddIndices { for _, v := range tc.reAddIndices {
tx := types.Tx{byte(v)} tx := types.Tx{byte(v)}


+ 32
- 15
mempool/clist_mempool_test.go View File

@ -170,7 +170,8 @@ func TestMempoolFilters(t *testing.T) {
{10, PreCheckMaxBytes(20), PostCheckMaxGas(0), 0}, {10, PreCheckMaxBytes(20), PostCheckMaxGas(0), 0},
} }
for tcIndex, tt := range tests { for tcIndex, tt := range tests {
mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
err := mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
require.NoError(t, err)
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
mempool.Flush() mempool.Flush()
@ -185,8 +186,9 @@ func TestMempoolUpdate(t *testing.T) {
// 1. Adds valid txs to the cache // 1. Adds valid txs to the cache
{ {
mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
err := mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
err = mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
if assert.Error(t, err) { if assert.Error(t, err) {
assert.Equal(t, ErrTxInCache, err) assert.Equal(t, ErrTxInCache, err)
} }
@ -196,7 +198,8 @@ func TestMempoolUpdate(t *testing.T) {
{ {
err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{}) err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{})
require.NoError(t, err) require.NoError(t, err)
mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
err = mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
assert.Zero(t, mempool.Size()) assert.Zero(t, mempool.Size())
} }
@ -204,11 +207,12 @@ func TestMempoolUpdate(t *testing.T) {
{ {
err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{}) err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
require.NoError(t, err) require.NoError(t, err)
mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
err = mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
require.NoError(t, err)
assert.Zero(t, mempool.Size()) assert.Zero(t, mempool.Size())
err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{}) err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
assert.NoError(t, err)
require.NoError(t, err)
} }
} }
@ -385,7 +389,8 @@ func TestMempoolCloseWAL(t *testing.T) {
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg) mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
defer cleanup() defer cleanup()
mempool.height = 10 mempool.height = 10
mempool.InitWAL()
err = mempool.InitWAL()
require.NoError(t, err)
// 4. Ensure that the directory contains the WAL file // 4. Ensure that the directory contains the WAL file
m2, err := filepath.Glob(filepath.Join(rootDir, "*")) m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
@ -393,7 +398,8 @@ func TestMempoolCloseWAL(t *testing.T) {
require.Equal(t, 1, len(m2), "expecting the wal match in") require.Equal(t, 1, len(m2), "expecting the wal match in")
// 5. Write some contents to the WAL // 5. Write some contents to the WAL
mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{})
err = mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{})
require.NoError(t, err)
walFilepath := mempool.wal.Path walFilepath := mempool.wal.Path
sum1 := checksumFile(walFilepath, t) sum1 := checksumFile(walFilepath, t)
@ -403,7 +409,8 @@ func TestMempoolCloseWAL(t *testing.T) {
// 7. Invoke CloseWAL() and ensure it discards the // 7. Invoke CloseWAL() and ensure it discards the
// WAL thus any other write won't go through. // WAL thus any other write won't go through.
mempool.CloseWAL() mempool.CloseWAL()
mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{})
err = mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{})
require.NoError(t, err)
sum2 := checksumFile(walFilepath, t) sum2 := checksumFile(walFilepath, t)
require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
@ -481,7 +488,8 @@ func TestMempoolTxsBytes(t *testing.T) {
assert.EqualValues(t, 1, mempool.TxsBytes()) assert.EqualValues(t, 1, mempool.TxsBytes())
// 3. zero again after tx is removed by Update // 3. zero again after tx is removed by Update
mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
err = mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
assert.EqualValues(t, 0, mempool.TxsBytes()) assert.EqualValues(t, 0, mempool.TxsBytes())
// 4. zero after Flush // 4. zero after Flush
@ -517,7 +525,11 @@ func TestMempoolTxsBytes(t *testing.T) {
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
err = appConnCon.Start() err = appConnCon.Start()
require.Nil(t, err) require.Nil(t, err)
defer appConnCon.Stop()
t.Cleanup(func() {
if err := appConnCon.Stop(); err != nil {
t.Error(err)
}
})
res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
require.NoError(t, err) require.NoError(t, err)
require.EqualValues(t, 0, res.Code) require.EqualValues(t, 0, res.Code)
@ -526,7 +538,8 @@ func TestMempoolTxsBytes(t *testing.T) {
require.NotEmpty(t, res2.Data) require.NotEmpty(t, res2.Data)
// Pretend like we committed nothing so txBytes gets rechecked and removed. // Pretend like we committed nothing so txBytes gets rechecked and removed.
mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
err = mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
assert.EqualValues(t, 0, mempool.TxsBytes()) assert.EqualValues(t, 0, mempool.TxsBytes())
// 7. Test RemoveTxByKey function // 7. Test RemoveTxByKey function
@ -548,7 +561,11 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
app := kvstore.NewApplication() app := kvstore.NewApplication()
cc, server := newRemoteApp(t, sockPath, app) cc, server := newRemoteApp(t, sockPath, app)
defer server.Stop()
t.Cleanup(func() {
if err := server.Stop(); err != nil {
t.Error(err)
}
})
config := cfg.ResetTestRoot("mempool_test") config := cfg.ResetTestRoot("mempool_test")
mempool, cleanup := newMempoolWithAppAndConfig(cc, config) mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
defer cleanup() defer cleanup()
@ -570,7 +587,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
tx := txs[txNum] tx := txs[txNum]
// this will err with ErrTxInCache many times ... // this will err with ErrTxInCache many times ...
mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)})
mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error
} }
err := mempool.FlushAppConn() err := mempool.FlushAppConn()
require.NoError(t, err) require.NoError(t, err)
@ -597,7 +614,7 @@ func newRemoteApp(
} }
func checksumIt(data []byte) string { func checksumIt(data []byte) string {
h := sha256.New() h := sha256.New()
h.Write(data)
h.Write(data) //nolint: errcheck // ignore errcheck
return fmt.Sprintf("%x", h.Sum(nil)) return fmt.Sprintf("%x", h.Sum(nil))
} }


+ 15
- 5
mempool/reactor_test.go View File

@ -48,7 +48,9 @@ func TestReactorBroadcastTxMessage(t *testing.T) {
reactors := makeAndConnectReactors(config, N) reactors := makeAndConnectReactors(config, N)
defer func() { defer func() {
for _, r := range reactors { for _, r := range reactors {
r.Stop()
if err := r.Stop(); err != nil {
assert.NoError(t, err)
}
} }
}() }()
for _, r := range reactors { for _, r := range reactors {
@ -69,7 +71,9 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
reactors := makeAndConnectReactors(config, N) reactors := makeAndConnectReactors(config, N)
defer func() { defer func() {
for _, r := range reactors { for _, r := range reactors {
r.Stop()
if err := r.Stop(); err != nil {
assert.NoError(t, err)
}
} }
}() }()
@ -88,7 +92,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
reactors := makeAndConnectReactors(config, N) reactors := makeAndConnectReactors(config, N)
defer func() { defer func() {
for _, r := range reactors { for _, r := range reactors {
r.Stop()
if err := r.Stop(); err != nil {
assert.NoError(t, err)
}
} }
}() }()
@ -112,7 +118,9 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
// stop reactors // stop reactors
for _, r := range reactors { for _, r := range reactors {
r.Stop()
if err := r.Stop(); err != nil {
assert.NoError(t, err)
}
} }
// check that we are not leaking any go-routines // check that we are not leaking any go-routines
@ -159,7 +167,9 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
reactors := makeAndConnectReactors(config, N) reactors := makeAndConnectReactors(config, N)
defer func() { defer func() {
for _, r := range reactors { for _, r := range reactors {
r.Stop()
if err := r.Stop(); err != nil {
assert.NoError(t, err)
}
} }
}() }()
reactor := reactors[0] reactor := reactors[0]


+ 12
- 4
node/node.go View File

@ -911,11 +911,17 @@ func (n *Node) OnStop() {
n.Logger.Info("Stopping Node") n.Logger.Info("Stopping Node")
// first stop the non-reactor services // first stop the non-reactor services
n.eventBus.Stop()
n.indexerService.Stop()
if err := n.eventBus.Stop(); err != nil {
n.Logger.Error("Error closing eventBus", "err", err)
}
if err := n.indexerService.Stop(); err != nil {
n.Logger.Error("Error closing indexerService", "err", err)
}
// now stop the reactors // now stop the reactors
n.sw.Stop()
if err := n.sw.Stop(); err != nil {
n.Logger.Error("Error closing switch", "err", err)
}
// stop mempool WAL // stop mempool WAL
if n.config.Mempool.WalEnabled() { if n.config.Mempool.WalEnabled() {
@ -937,7 +943,9 @@ func (n *Node) OnStop() {
} }
if pvsc, ok := n.privValidator.(service.Service); ok { if pvsc, ok := n.privValidator.(service.Service); ok {
pvsc.Stop()
if err := pvsc.Stop(); err != nil {
n.Logger.Error("Error closing private validator", "err", err)
}
} }
if n.prometheusSrv != nil { if n.prometheusSrv != nil {


+ 12
- 4
p2p/peer_test.go View File

@ -25,14 +25,18 @@ func TestPeerBasic(t *testing.T) {
// simulate remote peer // simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start() rp.Start()
defer rp.Stop()
t.Cleanup(rp.Stop)
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig())
require.Nil(err) require.Nil(err)
err = p.Start() err = p.Start()
require.Nil(err) require.Nil(err)
defer p.Stop()
t.Cleanup(func() {
if err := p.Stop(); err != nil {
t.Error(err)
}
})
assert.True(p.IsRunning()) assert.True(p.IsRunning())
assert.True(p.IsOutbound()) assert.True(p.IsOutbound())
@ -51,7 +55,7 @@ func TestPeerSend(t *testing.T) {
// simulate remote peer // simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config}
rp.Start() rp.Start()
defer rp.Stop()
t.Cleanup(rp.Stop)
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig())
require.Nil(err) require.Nil(err)
@ -59,7 +63,11 @@ func TestPeerSend(t *testing.T) {
err = p.Start() err = p.Start()
require.Nil(err) require.Nil(err)
defer p.Stop()
t.Cleanup(func() {
if err := p.Stop(); err != nil {
t.Error(err)
}
})
assert.True(p.CanSend(testCh)) assert.True(p.CanSend(testCh))
assert.True(p.Send(testCh, []byte("Asylum"))) assert.True(p.Send(testCh, []byte("Asylum")))


+ 6
- 2
p2p/switch.go View File

@ -245,7 +245,9 @@ func (sw *Switch) OnStop() {
// Stop reactors // Stop reactors
sw.Logger.Debug("Switch: Stopping reactors") sw.Logger.Debug("Switch: Stopping reactors")
for _, reactor := range sw.reactors { for _, reactor := range sw.reactors {
reactor.Stop()
if err := reactor.Stop(); err != nil {
sw.Logger.Error("error while stopped reactor", "reactor", reactor, "error", err)
}
} }
} }
@ -349,7 +351,9 @@ func (sw *Switch) StopPeerGracefully(peer Peer) {
func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
sw.transport.Cleanup(peer) sw.transport.Cleanup(peer)
peer.Stop()
if err := peer.Stop(); err != nil {
sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly
}
for _, reactor := range sw.reactors { for _, reactor := range sw.reactors {
reactor.RemovePeer(peer, reason) reactor.RemovePeer(peer, reason)


+ 26
- 14
p2p/switch_test.go View File

@ -227,7 +227,8 @@ func TestSwitchPeerFilter(t *testing.T) {
SwitchPeerFilters(filters...), SwitchPeerFilters(filters...),
) )
) )
sw.Start()
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
if err := sw.Stop(); err != nil { if err := sw.Stop(); err != nil {
t.Error(err) t.Error(err)
@ -277,7 +278,8 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
SwitchPeerFilters(filters...), SwitchPeerFilters(filters...),
) )
) )
sw.Start()
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
if err := sw.Stop(); err != nil { if err := sw.Stop(); err != nil {
t.Log(err) t.Log(err)
@ -384,7 +386,8 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
require.NotNil(sw.Peers().Get(rp.ID())) require.NotNil(sw.Peers().Get(rp.ID()))
// simulate failure by closing connection // simulate failure by closing connection
p.(*peer).CloseConn()
err = p.(*peer).CloseConn()
require.NoError(err)
assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond)
assert.False(p.IsRunning()) assert.False(p.IsRunning())
@ -396,7 +399,7 @@ func TestSwitchStopPeerForError(t *testing.T) {
scrapeMetrics := func() string { scrapeMetrics := func() string {
resp, err := http.Get(s.URL) resp, err := http.Get(s.URL)
assert.NoError(t, err)
require.NoError(t, err)
defer resp.Body.Close() defer resp.Body.Close()
buf, _ := ioutil.ReadAll(resp.Body) buf, _ := ioutil.ReadAll(resp.Body)
return string(buf) return string(buf)
@ -467,7 +470,8 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) {
require.NotNil(t, sw.Peers().Get(rp.ID())) require.NotNil(t, sw.Peers().Get(rp.ID()))
p := sw.Peers().List()[0] p := sw.Peers().List()[0]
p.(*peer).CloseConn()
err = p.(*peer).CloseConn()
require.NoError(t, err)
waitUntilSwitchHasAtLeastNPeers(sw, 1) waitUntilSwitchHasAtLeastNPeers(sw, 1)
assert.False(t, p.IsRunning()) // old peer instance assert.False(t, p.IsRunning()) // old peer instance
@ -594,8 +598,9 @@ func TestSwitchAcceptRoutine(t *testing.T) {
// make switch // make switch
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
sw.AddUnconditionalPeerIDs(unconditionalPeerIDs)
err := sw.Start()
err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs)
require.NoError(t, err)
err = sw.Start()
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
if err := sw.Stop(); err != nil { if err := sw.Stop(); err != nil {
@ -635,7 +640,8 @@ func TestSwitchAcceptRoutine(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// check conn is closed // check conn is closed
one := make([]byte, 1) one := make([]byte, 1)
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
err = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
require.NoError(t, err)
_, err = conn.Read(one) _, err = conn.Read(one)
assert.Equal(t, io.EOF, err) assert.Equal(t, io.EOF, err)
assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
@ -689,22 +695,24 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}})
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
err := sw.Start() err := sw.Start()
assert.NoError(t, err)
sw.Stop()
require.NoError(t, err)
err = sw.Stop()
require.NoError(t, err)
}) })
sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}})
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
err := sw.Start() err := sw.Start()
assert.NoError(t, err)
sw.Stop()
require.NoError(t, err)
err = sw.Stop()
require.NoError(t, err)
}) })
// TODO(melekes) check we remove our address from addrBook // TODO(melekes) check we remove our address from addrBook
sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}})
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
err := sw.Start() err := sw.Start()
assert.NoError(t, err)
require.NoError(t, err)
err = sw.Stop() err = sw.Stop()
require.NoError(t, err) require.NoError(t, err)
}) })
@ -751,7 +759,11 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) {
}) })
err := sw.Start() err := sw.Start()
require.NoError(t, err) require.NoError(t, err)
defer sw.Stop()
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
// add peer // add peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}


+ 12
- 4
p2p/trust/store.go View File

@ -72,7 +72,9 @@ func (tms *MetricStore) OnStop() {
// Stop all trust metric go-routines // Stop all trust metric go-routines
for _, tm := range tms.peerMetrics { for _, tm := range tms.peerMetrics {
tm.Stop()
if err := tm.Stop(); err != nil {
tms.Logger.Error("unable to stop metric store", "error", err)
}
} }
// Make the final trust history data save // Make the final trust history data save
@ -108,7 +110,9 @@ func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric {
if !ok { if !ok {
// If the metric is not available, we will create it // If the metric is not available, we will create it
tm = NewMetricWithConfig(tms.config) tm = NewMetricWithConfig(tms.config)
tm.Start()
if err := tm.Start(); err != nil {
tms.Logger.Error("unable to start metric store", "error", err)
}
// The metric needs to be in the map // The metric needs to be in the map
tms.peerMetrics[key] = tm tms.peerMetrics[key] = tm
} }
@ -168,7 +172,9 @@ func (tms *MetricStore) loadFromDB() bool {
for key, p := range peers { for key, p := range peers {
tm := NewMetricWithConfig(tms.config) tm := NewMetricWithConfig(tms.config)
tm.Start()
if err := tm.Start(); err != nil {
tms.Logger.Error("unable to start metric", "error", err)
}
tm.Init(p) tm.Init(p)
// Load the peer trust metric into the store // Load the peer trust metric into the store
tms.peerMetrics[key] = tm tms.peerMetrics[key] = tm
@ -193,7 +199,9 @@ func (tms *MetricStore) saveToDB() {
tms.Logger.Error("Failed to encode the TrustHistory", "err", err) tms.Logger.Error("Failed to encode the TrustHistory", "err", err)
return return
} }
tms.db.SetSync(trustMetricKey, bytes)
if err := tms.db.SetSync(trustMetricKey, bytes); err != nil {
tms.Logger.Error("failed to flush data to disk", "error", err)
}
} }
// Periodically saves the trust history data to the DB // Periodically saves the trust history data to the DB


+ 18
- 4
privval/signer_listener_endpoint_test.go View File

@ -73,7 +73,11 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) {
err = signerServer.Start() err = signerServer.Start()
require.NoError(t, err) require.NoError(t, err)
defer signerServer.Stop()
t.Cleanup(func() {
if err := signerServer.Stop(); err != nil {
t.Error(err)
}
})
select { select {
case attempts := <-attemptCh: case attempts := <-attemptCh:
@ -104,12 +108,18 @@ func TestRetryConnToRemoteSigner(t *testing.T) {
signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV)
startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh)
defer listenerEndpoint.Stop()
t.Cleanup(func() {
if err := listenerEndpoint.Stop(); err != nil {
t.Error(err)
}
})
require.NoError(t, signerServer.Start()) require.NoError(t, signerServer.Start())
assert.True(t, signerServer.IsRunning()) assert.True(t, signerServer.IsRunning())
<-endpointIsOpenCh <-endpointIsOpenCh
signerServer.Stop()
if err := signerServer.Stop(); err != nil {
t.Error(err)
}
dialerEndpoint2 := NewSignerDialerEndpoint( dialerEndpoint2 := NewSignerDialerEndpoint(
logger, logger,
@ -120,7 +130,11 @@ func TestRetryConnToRemoteSigner(t *testing.T) {
// let some pings pass // let some pings pass
require.NoError(t, signerServer2.Start()) require.NoError(t, signerServer2.Start())
assert.True(t, signerServer2.IsRunning()) assert.True(t, signerServer2.IsRunning())
defer signerServer2.Stop()
t.Cleanup(func() {
if err := signerServer2.Stop(); err != nil {
t.Error(err)
}
})
// give the client some time to re-establish the conn to the remote signer // give the client some time to re-establish the conn to the remote signer
// should see sth like this in the logs: // should see sth like this in the logs:


+ 12
- 4
proxy/multi_app_conn.go View File

@ -157,16 +157,24 @@ func (app *multiAppConn) killTMOnClientError() {
func (app *multiAppConn) stopAllClients() { func (app *multiAppConn) stopAllClients() {
if app.consensusConnClient != nil { if app.consensusConnClient != nil {
app.consensusConnClient.Stop()
if err := app.consensusConnClient.Stop(); err != nil {
app.Logger.Error("error while stopping consensus client", "error", err)
}
} }
if app.mempoolConnClient != nil { if app.mempoolConnClient != nil {
app.mempoolConnClient.Stop()
if err := app.mempoolConnClient.Stop(); err != nil {
app.Logger.Error("error while stopping mempool client", "error", err)
}
} }
if app.queryConnClient != nil { if app.queryConnClient != nil {
app.queryConnClient.Stop()
if err := app.queryConnClient.Stop(); err != nil {
app.Logger.Error("error while stopping query client", "error", err)
}
} }
if app.snapshotConnClient != nil { if app.snapshotConnClient != nil {
app.snapshotConnClient.Stop()
if err := app.snapshotConnClient.Stop(); err != nil {
app.Logger.Error("error while stopping snapshot client", "error", err)
}
} }
} }


+ 7
- 2
proxy/multi_app_conn_test.go View File

@ -35,7 +35,8 @@ func TestAppConns_Start_Stop(t *testing.T) {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
appConns.Stop()
err = appConns.Stop()
require.NoError(t, err)
clientMock.AssertExpectations(t) clientMock.AssertExpectations(t)
} }
@ -71,7 +72,11 @@ func TestAppConns_Failure(t *testing.T) {
err := appConns.Start() err := appConns.Start()
require.NoError(t, err) require.NoError(t, err)
defer appConns.Stop()
t.Cleanup(func() {
if err := appConns.Stop(); err != nil {
t.Error(err)
}
})
// simulate failure // simulate failure
close(quitCh) close(quitCh)


+ 4
- 2
rpc/client/rpc_test.go View File

@ -190,7 +190,8 @@ func TestABCIQuery(t *testing.T) {
apph := bres.Height + 1 // this is where the tx will be applied to the state apph := bres.Height + 1 // this is where the tx will be applied to the state
// wait before querying // wait before querying
client.WaitForHeight(c, apph, nil)
err = client.WaitForHeight(c, apph, nil)
require.NoError(t, err)
res, err := c.ABCIQuery("/key", k) res, err := c.ABCIQuery("/key", k)
qres := res.Response qres := res.Response
if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { if assert.Nil(t, err) && assert.True(t, qres.IsOK()) {
@ -624,7 +625,8 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) {
require.Equal(t, *bresult2, *r2) require.Equal(t, *bresult2, *r2)
apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1
client.WaitForHeight(c, apph, nil)
err = client.WaitForHeight(c, apph, nil)
require.NoError(t, err)
q1, err := batch.ABCIQuery("/key", k1) q1, err := batch.ABCIQuery("/key", k1)
require.NoError(t, err) require.NoError(t, err)


+ 4
- 1
rpc/jsonrpc/client/ws_client.go View File

@ -349,7 +349,10 @@ func (c *WSClient) reconnectRoutine() {
c.wg.Wait() c.wg.Wait()
if err := c.reconnect(); err != nil { if err := c.reconnect(); err != nil {
c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
c.Stop()
if err = c.Stop(); err != nil {
c.Logger.Error("failed to stop conn", "error", err)
}
return return
} }
// drain reconnectAfter // drain reconnectAfter


+ 3
- 1
rpc/jsonrpc/server/ws_handler.go View File

@ -99,7 +99,9 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ
wm.logger.Error("Failed to start connection", "err", err) wm.logger.Error("Failed to start connection", "err", err)
return return
} }
con.Stop()
if err := con.Stop(); err != nil {
wm.logger.Error("error while stopping connection", "error", err)
}
} }
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////


+ 16
- 5
state/txindex/indexer_service_test.go View File

@ -22,7 +22,11 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
eventBus.SetLogger(log.TestingLogger()) eventBus.SetLogger(log.TestingLogger())
err := eventBus.Start() err := eventBus.Start()
require.NoError(t, err) require.NoError(t, err)
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
// tx indexer // tx indexer
store := db.NewMemDB() store := db.NewMemDB()
@ -32,27 +36,34 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
service.SetLogger(log.TestingLogger()) service.SetLogger(log.TestingLogger())
err = service.Start() err = service.Start()
require.NoError(t, err) require.NoError(t, err)
defer service.Stop()
t.Cleanup(func() {
if err := service.Stop(); err != nil {
t.Error(err)
}
})
// publish block with txs // publish block with txs
eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{
err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{
Header: types.Header{Height: 1}, Header: types.Header{Height: 1},
NumTxs: int64(2), NumTxs: int64(2),
}) })
require.NoError(t, err)
txResult1 := &abci.TxResult{ txResult1 := &abci.TxResult{
Height: 1, Height: 1,
Index: uint32(0), Index: uint32(0),
Tx: types.Tx("foo"), Tx: types.Tx("foo"),
Result: abci.ResponseDeliverTx{Code: 0}, Result: abci.ResponseDeliverTx{Code: 0},
} }
eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1})
err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1})
require.NoError(t, err)
txResult2 := &abci.TxResult{ txResult2 := &abci.TxResult{
Height: 1, Height: 1,
Index: uint32(1), Index: uint32(1),
Tx: types.Tx("bar"), Tx: types.Tx("bar"),
Result: abci.ResponseDeliverTx{Code: 0}, Result: abci.ResponseDeliverTx{Code: 0},
} }
eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2})
err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2})
require.NoError(t, err)
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)


+ 3
- 1
types/block.go View File

@ -252,7 +252,9 @@ func BlockFromProto(bp *tmproto.Block) (*Block, error) {
return nil, err return nil, err
} }
b.Data = data b.Data = data
b.Evidence.FromProto(&bp.Evidence)
if err := b.Evidence.FromProto(&bp.Evidence); err != nil {
return nil, err
}
if bp.LastCommit != nil { if bp.LastCommit != nil {
lc, err := CommitFromProto(bp.LastCommit) lc, err := CommitFromProto(bp.LastCommit)


+ 2
- 2
types/block_test.go View File

@ -173,8 +173,8 @@ func makeBlockIDRandom() BlockID {
blockHash = make([]byte, tmhash.Size) blockHash = make([]byte, tmhash.Size)
partSetHash = make([]byte, tmhash.Size) partSetHash = make([]byte, tmhash.Size)
) )
rand.Read(blockHash)
rand.Read(partSetHash)
rand.Read(blockHash) //nolint: errcheck // ignore errcheck for read
rand.Read(partSetHash) //nolint: errcheck // ignore errcheck for read
return BlockID{blockHash, PartSetHeader{123, partSetHash}} return BlockID{blockHash, PartSetHeader{123, partSetHash}}
} }


+ 3
- 1
types/event_bus.go View File

@ -59,7 +59,9 @@ func (b *EventBus) OnStart() error {
} }
func (b *EventBus) OnStop() { func (b *EventBus) OnStop() {
b.pubsub.Stop()
if err := b.pubsub.Stop(); err != nil {
b.pubsub.Logger.Error("error trying to stop eventBus", "error", err)
}
} }
func (b *EventBus) NumClients() int { func (b *EventBus) NumClients() int {


+ 43
- 9
types/event_bus_test.go View File

@ -20,7 +20,11 @@ func TestEventBusPublishEventTx(t *testing.T) {
eventBus := NewEventBus() eventBus := NewEventBus()
err := eventBus.Start() err := eventBus.Start()
require.NoError(t, err) require.NoError(t, err)
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
tx := Tx("foo") tx := Tx("foo")
result := abci.ResponseDeliverTx{ result := abci.ResponseDeliverTx{
@ -65,7 +69,11 @@ func TestEventBusPublishEventNewBlock(t *testing.T) {
eventBus := NewEventBus() eventBus := NewEventBus()
err := eventBus.Start() err := eventBus.Start()
require.NoError(t, err) require.NoError(t, err)
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
block := MakeBlock(0, []Tx{}, nil, []Evidence{}) block := MakeBlock(0, []Tx{}, nil, []Evidence{})
resultBeginBlock := abci.ResponseBeginBlock{ resultBeginBlock := abci.ResponseBeginBlock{
@ -112,7 +120,11 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) {
eventBus := NewEventBus() eventBus := NewEventBus()
err := eventBus.Start() err := eventBus.Start()
require.NoError(t, err) require.NoError(t, err)
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
tx := Tx("foo") tx := Tx("foo")
result := abci.ResponseDeliverTx{ result := abci.ResponseDeliverTx{
@ -216,7 +228,11 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) {
eventBus := NewEventBus() eventBus := NewEventBus()
err := eventBus.Start() err := eventBus.Start()
require.NoError(t, err) require.NoError(t, err)
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
block := MakeBlock(0, []Tx{}, nil, []Evidence{}) block := MakeBlock(0, []Tx{}, nil, []Evidence{})
resultBeginBlock := abci.ResponseBeginBlock{ resultBeginBlock := abci.ResponseBeginBlock{
@ -263,7 +279,11 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) {
eventBus := NewEventBus() eventBus := NewEventBus()
err := eventBus.Start() err := eventBus.Start()
require.NoError(t, err) require.NoError(t, err)
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
ev := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") ev := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id")
@ -297,7 +317,11 @@ func TestEventBusPublish(t *testing.T) {
eventBus := NewEventBus() eventBus := NewEventBus()
err := eventBus.Start() err := eventBus.Start()
require.NoError(t, err) require.NoError(t, err)
defer eventBus.Stop()
t.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
t.Error(err)
}
})
const numEventsExpected = 14 const numEventsExpected = 14
@ -389,8 +413,15 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache
eventBus.Start()
defer eventBus.Stop()
err := eventBus.Start()
if err != nil {
b.Error(err)
}
b.Cleanup(func() {
if err := eventBus.Stop(); err != nil {
b.Error(err)
}
})
ctx := context.Background() ctx := context.Background()
q := EventQueryNewBlock q := EventQueryNewBlock
@ -423,7 +454,10 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes
eventType = randEvent() eventType = randEvent()
} }
eventBus.Publish(eventType, EventDataString("Gamora"))
err := eventBus.Publish(eventType, EventDataString("Gamora"))
if err != nil {
b.Error(err)
}
} }
} }


+ 2
- 1
types/genesis_test.go View File

@ -129,7 +129,8 @@ func TestGenesisSaveAs(t *testing.T) {
genDoc := randomGenesisDoc() genDoc := randomGenesisDoc()
// save // save
genDoc.SaveAs(tmpfile.Name())
err = genDoc.SaveAs(tmpfile.Name())
require.NoError(t, err)
stat, err := tmpfile.Stat() stat, err := tmpfile.Stat()
require.NoError(t, err) require.NoError(t, err)
if err != nil && stat.Size() <= 0 { if err != nil && stat.Size() <= 0 {


+ 4
- 1
types/params.go View File

@ -150,7 +150,10 @@ func HashConsensusParams(params tmproto.ConsensusParams) []byte {
panic(err) panic(err)
} }
hasher.Write(bz)
_, err = hasher.Write(bz)
if err != nil {
panic(err)
}
return hasher.Sum(nil) return hasher.Sum(nil)
} }


+ 6
- 3
types/vote_set_test.go View File

@ -307,7 +307,8 @@ func TestVoteSet_Conflicts(t *testing.T) {
} }
// start tracking blockHash1 // start tracking blockHash1
voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}})
err = voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}})
require.NoError(t, err)
// val0 votes again for blockHash1. // val0 votes again for blockHash1.
{ {
@ -318,7 +319,8 @@ func TestVoteSet_Conflicts(t *testing.T) {
} }
// attempt tracking blockHash2, should fail because already set for peerA. // attempt tracking blockHash2, should fail because already set for peerA.
voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}})
err = voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}})
require.Error(t, err)
// val0 votes again for blockHash1. // val0 votes again for blockHash1.
{ {
@ -369,7 +371,8 @@ func TestVoteSet_Conflicts(t *testing.T) {
} }
// now attempt tracking blockHash1 // now attempt tracking blockHash1
voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}})
err = voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}})
require.NoError(t, err)
// val2 votes for blockHash1. // val2 votes for blockHash1.
{ {


Loading…
Cancel
Save