diff --git a/.golangci.yml b/.golangci.yml index 6adbbd9da..b07ec3a46 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,7 +8,6 @@ linters: - golint - maligned - errcheck - - staticcheck - interfacer - unconvert - goconst diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 8c444abc5..e326055fb 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -6,8 +6,8 @@ import ( "sync" "time" - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + "golang.org/x/net/context" + "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" @@ -39,7 +39,7 @@ func NewGRPCClient(addr string, mustConnect bool) *grpcClient { return cli } -func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { +func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return cmn.Connect(addr) } @@ -49,7 +49,7 @@ func (cli *grpcClient) OnStart() error { } RETRY_LOOP: for { - conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { if cli.mustConnect { return err @@ -65,7 +65,7 @@ RETRY_LOOP: ENSURE_CONNECTED: for { - _, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true)) + _, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true)) if err == nil { break ENSURE_CONNECTED } @@ -125,7 +125,7 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) { func (cli *grpcClient) EchoAsync(msg string) *ReqRes { req := types.ToRequestEcho(msg) - res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true)) + res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -134,7 +134,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes { func (cli *grpcClient) FlushAsync() *ReqRes { req := types.ToRequestFlush() - res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true)) + res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -143,7 +143,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes { func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { req := types.ToRequestInfo(params) - res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true)) + res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -152,7 +152,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes { req := types.ToRequestSetOption(params) - res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true)) + res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -161,7 +161,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes { func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { req := types.ToRequestDeliverTx(params) - res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true)) + res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -170,7 +170,7 @@ func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes { req := types.ToRequestCheckTx(params) - res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true)) + res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -179,7 +179,7 @@ func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes { func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { req := types.ToRequestQuery(params) - res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true)) + res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -188,7 +188,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { func (cli *grpcClient) CommitAsync() *ReqRes { req := types.ToRequestCommit() - res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true)) + res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -197,7 +197,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes { func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { req := types.ToRequestInitChain(params) - res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true)) + res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -206,7 +206,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { req := types.ToRequestBeginBlock(params) - res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true)) + res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -215,7 +215,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { req := types.ToRequestEndBlock(params) - res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true)) + res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 6282f3a44..74510700b 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -107,7 +107,7 @@ func testStream(t *testing.T, app types.Application) { //------------------------- // test grpc -func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { +func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return cmn.Connect(addr) } @@ -123,7 +123,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) { defer server.Stop() // Connect to the socket - conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { t.Fatalf("Error dialing GRPC server: %v", err.Error()) } diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index 5d38471dc..574ef3f29 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -141,9 +141,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - if !peer.Send(BlockchainChannel, msgBytes) { - // doing nothing, will try later in `poolRoutine` - } + peer.Send(BlockchainChannel, msgBytes) + // it's OK if send fails. will try later in poolRoutine + // peer is added to the pool once we receive the first // bcStatusResponseMessage from the peer and call pool.SetPeerHeight } @@ -191,18 +191,13 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) switch msg := msg.(type) { case *bcBlockRequestMessage: - if queued := bcR.respondToPeer(msg, src); !queued { - // Unfortunately not queued since the queue is full. - } + bcR.respondToPeer(msg, src) case *bcBlockResponseMessage: bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) case *bcStatusRequestMessage: // Send peer our state. msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - queued := src.TrySend(BlockchainChannel, msgBytes) - if !queued { - // sorry - } + src.TrySend(BlockchainChannel, msgBytes) case *bcStatusResponseMessage: // Got a peer status. Unverified. bcR.pool.SetPeerHeight(src.ID(), msg.Height) @@ -274,9 +269,10 @@ FOR_LOOP: conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) if ok { conR.SwitchToConsensus(state, blocksSynced) - } else { - // should only happen during testing } + // else { + // should only happen during testing + // } break FOR_LOOP } diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index 2f95cebaf..480b87f34 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -169,9 +169,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - if !peer.Send(BlockchainChannel, msgBytes) { - // doing nothing, will try later in `poolRoutine` - } + peer.Send(BlockchainChannel, msgBytes) + // it's OK if send fails. will try later in poolRoutine + // peer is added to the pool once we receive the first // bcStatusResponseMessage from the peer and call pool.updatePeer() } @@ -381,10 +381,11 @@ ForLoop: err: msg.data.err, }, }) - } else { - // For slow peers, or errors due to blocks received from wrong peer - // the FSM had already removed the peers } + // else { + // For slow peers, or errors due to blocks received from wrong peer + // the FSM had already removed the peers + // } default: bcR.Logger.Error("Event from FSM not supported", "type", msg.event) } @@ -465,9 +466,10 @@ func (bcR *BlockchainReactor) switchToConsensus() { if ok { conR.SwitchToConsensus(bcR.state, bcR.blocksSynced) bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv} - } else { - // Should only happen during testing. } + // else { + // Should only happen during testing. + // } } // Implements bcRNotifier diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index de0179869..94f7340c2 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -155,12 +155,14 @@ func TestMempoolRmBadTx(t *testing.T) { // and the tx should get removed from the pool err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) { if r.GetCheckTx().Code != code.CodeTypeBadNonce { - t.Fatalf("expected checktx to return bad nonce, got %v", r) + t.Errorf("expected checktx to return bad nonce, got %v", r) + return } checkTxRespCh <- struct{}{} }) if err != nil { - t.Fatalf("Error after CheckTx: %v", err) + t.Errorf("Error after CheckTx: %v", err) + return } // check for the tx @@ -180,7 +182,8 @@ func TestMempoolRmBadTx(t *testing.T) { case <-checkTxRespCh: // success case <-ticker: - t.Fatalf("Timed out waiting for tx to return") + t.Errorf("Timed out waiting for tx to return") + return } // Wait until the tx is removed @@ -189,7 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) { case <-emptyMempoolCh: // success case <-ticker: - t.Fatalf("Timed out waiting for tx to be removed") + t.Errorf("Timed out waiting for tx to be removed") + return } } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 612fde7f6..af6a62568 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -235,7 +235,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { // send a tx if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil); err != nil { - //t.Fatal(err) + t.Error(err) } // wait till everyone makes the first new block diff --git a/consensus/state.go b/consensus/state.go index 1f6bad9ab..0a48b0525 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -690,13 +690,13 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { cs.statsMsgQueue <- mi } - if err == ErrAddingVote { - // TODO: punish peer - // We probably don't want to stop the peer here. The vote does not - // necessarily comes from a malicious peer but can be just broadcasted by - // a typical peer. - // https://github.com/tendermint/tendermint/issues/1281 - } + // if err == ErrAddingVote { + // TODO: punish peer + // We probably don't want to stop the peer here. The vote does not + // necessarily comes from a malicious peer but can be just broadcasted by + // a typical peer. + // https://github.com/tendermint/tendermint/issues/1281 + // } // NOTE: the vote is broadcast to peers by the reactor listening // for vote events @@ -709,7 +709,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { return } - if err != nil { + if err != nil { // nolint:staticcheck // Causes TestReactorValidatorSetChanges to timeout // https://github.com/tendermint/tendermint/issues/3406 // cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, @@ -1227,9 +1227,10 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) - } else { - // We just need to keep waiting. } + // else { + // We just need to keep waiting. + // } } } diff --git a/consensus/state_test.go b/consensus/state_test.go index 93ef0d4cb..1888e4057 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -621,8 +621,6 @@ func TestStateLockPOLUnlock(t *testing.T) { // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) - rs = cs1.GetRoundState() - // add precommits from the rest signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) @@ -1317,8 +1315,6 @@ func TestStartNextHeightCorrectly(t *testing.T) { // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) - rs = cs1.GetRoundState() - // add precommits signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) @@ -1370,8 +1366,6 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) - rs = cs1.GetRoundState() - // add precommits signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) diff --git a/go.sum b/go.sum index 23f548f0c..9766e4f70 100644 --- a/go.sum +++ b/go.sum @@ -84,6 +84,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 h1:Cto4X6SVMWRPBkJ/3YHn1iDGDGc/Z+sW+AEMKHMVvN4= diff --git a/libs/common/async.go b/libs/common/async.go index e3293ab4c..326b97248 100644 --- a/libs/common/async.go +++ b/libs/common/async.go @@ -61,9 +61,10 @@ func (trs *TaskResultSet) Reap() *TaskResultSet { TaskResult: result, OK: true, } - } else { - // We already wrote it. } + // else { + // We already wrote it. + // } default: // Do nothing. } @@ -83,9 +84,10 @@ func (trs *TaskResultSet) Wait() *TaskResultSet { TaskResult: result, OK: true, } - } else { - // We already wrote it. } + // else { + // We already wrote it. + // } } return trs } diff --git a/libs/common/async_test.go b/libs/common/async_test.go index f565b4bd3..c19ffc86f 100644 --- a/libs/common/async_test.go +++ b/libs/common/async_test.go @@ -40,9 +40,10 @@ func TestParallel(t *testing.T) { } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) failedTasks++ - } else { - // Good! } + // else { + // Good! + // } } assert.Equal(t, failedTasks, 0, "No task should have failed") assert.Nil(t, trs.FirstError(), "There should be no errors") diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index d5f61dc07..5a2baa14f 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -273,11 +273,11 @@ func TestResubscribe(t *testing.T) { defer s.Stop() ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) + _, err := s.Subscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) err = s.Unsubscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) - subscription, err = s.Subscribe(ctx, clientID, query.Empty{}) + subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) err = s.Publish(ctx, "Cable") diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index db2b6e46c..d92a486ea 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -143,13 +143,13 @@ func TestTxProofs(t *testing.T) { // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() - res, err := cl.Tx(key, true) + _, err = cl.Tx(key, true) require.NotNil(err) require.Contains(err.Error(), "not found") // Now let's check with the real tx root hash. key = types.Tx(tx).Hash() - res, err = cl.Tx(key, true) + res, err := cl.Tx(key, true) require.NoError(err, "%#v", err) require.NotNil(res) keyHash := merkle.SimpleHashFromByteSlices([][]byte{key}) diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index 81123cb63..fc4591d29 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -250,11 +250,11 @@ func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), t // so we only record the sender for txs still in the mempool. if e, ok := mem.txsMap.Load(txKey(tx)); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) - if _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true); loaded { - // TODO: consider punishing peer for dups, - // its non-trivial since invalid txs can become valid, - // but they can spam the same tx with little cost to them atm. - } + memTx.senders.LoadOrStore(txInfo.SenderID, true) + // TODO: consider punishing peer for dups, + // its non-trivial since invalid txs can become valid, + // but they can spam the same tx with little cost to them atm. + } return ErrTxInCache diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 283b00ebe..91e3e2099 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -57,7 +57,8 @@ func TestMConnectionSendFlushStop(t *testing.T) { msgB := make([]byte, aminoMsgLength) _, err := server.Read(msgB) if err != nil { - t.Fatal(err) + t.Error(err) + return } errCh <- err }() diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 76982ed97..9ab9695a3 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -192,7 +192,8 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn net.Conn, txt string, n in for i := 0; i < n; i++ { _, err := conn.Write([]byte(txt)) if err != nil { - t.Fatalf("Failed to write to fooSecConn: %v", err) + t.Errorf("Failed to write to fooSecConn: %v", err) + return } } } @@ -408,7 +409,8 @@ func BenchmarkWriteSecretConnection(b *testing.B) { if err == io.EOF { return } else if err != nil { - b.Fatalf("Failed to read from barSecConn: %v", err) + b.Errorf("Failed to read from barSecConn: %v", err) + return } } }() @@ -418,7 +420,8 @@ func BenchmarkWriteSecretConnection(b *testing.B) { idx := cmn.RandIntn(len(fooWriteBytes)) _, err := fooSecConn.Write(fooWriteBytes[idx]) if err != nil { - b.Fatalf("Failed to write to fooSecConn: %v", err) + b.Errorf("Failed to write to fooSecConn: %v", err) + return } } b.StopTimer() @@ -451,7 +454,8 @@ func BenchmarkReadSecretConnection(b *testing.B) { idx := cmn.RandIntn(len(fooWriteBytes)) _, err := fooSecConn.Write(fooWriteBytes[idx]) if err != nil { - b.Fatalf("Failed to write to fooSecConn: %v, %v,%v", err, i, b.N) + b.Errorf("Failed to write to fooSecConn: %v, %v,%v", err, i, b.N) + return } } }() diff --git a/p2p/switch_test.go b/p2p/switch_test.go index aa5ca78bf..0879acc2d 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -16,7 +16,7 @@ import ( "testing" "time" - stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -348,7 +348,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { } func TestSwitchStopPeerForError(t *testing.T) { - s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) + s := httptest.NewServer(promhttp.Handler()) defer s.Close() scrapeMetrics := func() string { diff --git a/privval/signer_validator_endpoint_test.go b/privval/signer_validator_endpoint_test.go index bf4c29930..611e743c9 100644 --- a/privval/signer_validator_endpoint_test.go +++ b/privval/signer_validator_endpoint_test.go @@ -331,9 +331,10 @@ func TestErrUnexpectedResponse(t *testing.T) { // we do not want to Start() the remote signer here and instead use the connection to // reply with intentionally wrong replies below: rsConn, err := serviceEndpoint.connect() - defer rsConn.Close() require.NoError(t, err) require.NotNil(t, rsConn) + defer rsConn.Close() + // send over public key to get the remote signer running: go testReadWriteResponse(t, &PubKeyResponse{}, rsConn) <-readyCh diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 922016dd5..d02120e10 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -2,8 +2,8 @@ package core_grpc import ( "net" - "time" + "golang.org/x/net/context" "google.golang.org/grpc" cmn "github.com/tendermint/tendermint/libs/common" @@ -26,13 +26,13 @@ func StartGRPCServer(ln net.Listener) error { // StartGRPCClient dials the gRPC server using protoAddr and returns a new // BroadcastAPIClient. func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { panic(err) } return NewBroadcastAPIClient(conn) } -func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { +func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return cmn.Connect(addr) } diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index e3b559569..05180c753 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -369,10 +369,11 @@ func (c *WSClient) writeRoutine() { defer func() { ticker.Stop() - if err := c.conn.Close(); err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - } + c.conn.Close() + // err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + // } c.wg.Done() }() @@ -421,10 +422,11 @@ func (c *WSClient) writeRoutine() { // executing all reads from this goroutine. func (c *WSClient) readRoutine() { defer func() { - if err := c.conn.Close(); err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - } + c.conn.Close() + // err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + // } c.wg.Done() }() diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index e902fe21a..4f2cc9ada 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -212,7 +212,8 @@ func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { select { case resp := <-c.ResponsesCh: if resp.Error != nil { - t.Fatalf("unexpected error: %v", resp.Error) + t.Errorf("unexpected error: %v", resp.Error) + return } if resp.Result != nil { wg.Done() diff --git a/state/state_test.go b/state/state_test.go index 29f76e27c..0512fbf38 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -185,11 +185,11 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { assert := assert.New(t) // Can't load anything for height 0. - v, err := sm.LoadValidators(stateDB, 0) + _, err := sm.LoadValidators(stateDB, 0) assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. - v, err = sm.LoadValidators(stateDB, 1) + v, err := sm.LoadValidators(stateDB, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") diff --git a/tools/tm-monitor/mock/eventmeter.go b/tools/tm-monitor/mock/eventmeter.go index 7bbedc7fa..7119c4399 100644 --- a/tools/tm-monitor/mock/eventmeter.go +++ b/tools/tm-monitor/mock/eventmeter.go @@ -54,7 +54,7 @@ func (c *RpcClient) Call(method string, params map[string]interface{}, result in } rv, rt := reflect.ValueOf(result), reflect.TypeOf(result) - rv, rt = rv.Elem(), rt.Elem() + rv, _ = rv.Elem(), rt.Elem() rv.Set(reflect.ValueOf(s)) return s, nil diff --git a/types/genesis_test.go b/types/genesis_test.go index f977513e7..33bdd34c1 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -68,7 +68,7 @@ func TestGenesisGood(t *testing.T) { genDoc.ConsensusParams.Block.MaxBytes = 0 genDocBytes, err = cdc.MarshalJSON(genDoc) assert.NoError(t, err, "error marshalling genDoc") - genDoc, err = GenesisDocFromJSON(genDocBytes) + _, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") // Genesis doc from raw json diff --git a/types/validator_set.go b/types/validator_set.go index 65358714d..2078e7a95 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -619,10 +619,11 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i // Good precommit! if blockID.Equals(precommit.BlockID) { talliedVotingPower += val.VotingPower - } else { - // It's OK that the BlockID doesn't match. We include stray - // precommits to measure validator availability. } + // else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + // } } if talliedVotingPower > vals.TotalVotingPower()*2/3 { @@ -703,10 +704,11 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin // Good precommit! if blockID.Equals(precommit.BlockID) { oldVotingPower += val.VotingPower - } else { - // It's OK that the BlockID doesn't match. We include stray - // precommits to measure validator availability. } + // else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + // } } if oldVotingPower <= oldVals.TotalVotingPower()*2/3 {