Browse Source

errors: formating cleanup (#7507)

pull/7511/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
3c8955e4b8
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 136 additions and 167 deletions
  1. +2
    -2
      abci/client/grpc_client.go
  2. +1
    -1
      abci/client/socket_client.go
  3. +5
    -10
      abci/example/example_test.go
  4. +2
    -2
      abci/example/kvstore/persistent_kvstore.go
  5. +1
    -1
      abci/server/grpc_server.go
  6. +3
    -3
      abci/server/socket_server.go
  7. +1
    -1
      cmd/tendermint/commands/reset_priv_validator.go
  8. +1
    -1
      cmd/tendermint/commands/root.go
  9. +3
    -3
      internal/blocksync/pool.go
  10. +2
    -2
      internal/consensus/byzantine_test.go
  11. +1
    -1
      internal/consensus/common_test.go
  12. +2
    -2
      internal/consensus/mempool_test.go
  13. +4
    -4
      internal/consensus/msgs.go
  14. +2
    -2
      internal/consensus/replay.go
  15. +2
    -2
      internal/consensus/replay_file.go
  16. +8
    -14
      internal/consensus/replay_test.go
  17. +3
    -3
      internal/consensus/state.go
  18. +2
    -4
      internal/consensus/types/height_vote_set_test.go
  19. +4
    -4
      internal/consensus/wal.go
  20. +1
    -1
      internal/evidence/pool.go
  21. +2
    -2
      internal/libs/autofile/group.go
  22. +1
    -1
      internal/libs/queue/queue_test.go
  23. +2
    -2
      internal/p2p/conn/connection.go
  24. +8
    -8
      internal/p2p/conn/secret_connection_test.go
  25. +12
    -30
      internal/proxy/app_conn_test.go
  26. +1
    -1
      internal/pubsub/pubsub.go
  27. +1
    -1
      internal/pubsub/query/query_test.go
  28. +1
    -1
      internal/pubsub/query/syntax/syntax_test.go
  29. +1
    -1
      internal/rpc/core/mempool.go
  30. +5
    -5
      internal/state/execution.go
  31. +4
    -4
      internal/state/indexer/sink/psql/psql_test.go
  32. +1
    -1
      internal/state/indexer/tx/kv/kv.go
  33. +2
    -2
      internal/state/state.go
  34. +4
    -4
      internal/store/store.go
  35. +1
    -1
      light/detector.go
  36. +4
    -4
      node/node.go
  37. +6
    -6
      privval/file.go
  38. +6
    -6
      rpc/jsonrpc/server/ws_handler.go
  39. +5
    -5
      scripts/json2wal/main.go
  40. +3
    -3
      scripts/wal2json/main.go
  41. +1
    -1
      test/e2e/runner/load.go
  42. +2
    -2
      test/e2e/runner/main.go
  43. +9
    -9
      types/block.go
  44. +1
    -1
      types/proposal.go
  45. +1
    -1
      types/validator_set.go
  46. +1
    -1
      types/vote.go
  47. +1
    -1
      types/vote_set_test.go

+ 2
- 2
abci/client/grpc_client.go View File

@ -156,9 +156,9 @@ func (cli *grpcClient) StopForError(err error) {
}
cli.mtx.Unlock()
cli.logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
cli.logger.Error("Stopping abci.grpcClient for error", "err", err)
if err := cli.Stop(); err != nil {
cli.logger.Error("Error stopping abci.grpcClient", "err", err)
cli.logger.Error("error stopping abci.grpcClient", "err", err)
}
}


+ 1
- 1
abci/client/socket_client.go View File

@ -592,6 +592,6 @@ func (cli *socketClient) stopForError(err error) {
cli.logger.Info("Stopping abci.socketClient", "reason", err)
if err := cli.Stop(); err != nil {
cli.logger.Error("Error stopping abci.socketClient", "err", err)
cli.logger.Error("error stopping abci.socketClient", "err", err)
}
}

+ 5
- 10
abci/example/example_test.go View File

@ -132,6 +132,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
}
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) {
t.Helper()
numDeliverTxs := 2000
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
@ -140,10 +141,7 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
// Start the listener
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
if err := server.Start(ctx); err != nil {
t.Fatalf("Error starting GRPC server: %v", err.Error())
}
require.NoError(t, server.Start(ctx))
t.Cleanup(func() { server.Wait() })
// Connect to the socket
@ -151,9 +149,7 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialerFunc),
)
if err != nil {
t.Fatalf("Error dialing GRPC server: %v", err.Error())
}
require.NoError(t, err, "Error dialing GRPC server")
t.Cleanup(func() {
if err := conn.Close(); err != nil {
@ -167,9 +163,8 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
require.NoError(t, err, "Error in GRPC DeliverTx")
counter++
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)


+ 2
- 2
abci/example/kvstore/persistent_kvstore.go View File

@ -113,7 +113,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
for _, v := range req.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("Error updating validators", "r", r)
app.logger.Error("error updating validators", "r", r)
}
}
return types.ResponseInitChain{}
@ -271,7 +271,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
if err := types.WriteMessage(&v, value); err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Error encoding validator: %v", err)}
Log: fmt.Sprintf("error encoding validator: %v", err)}
}
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
panic(err)


+ 1
- 1
abci/server/grpc_server.go View File

@ -58,7 +58,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
}()
if err := s.server.Serve(s.listener); err != nil {
s.logger.Error("Error serving gRPC server", "err", err)
s.logger.Error("error serving gRPC server", "err", err)
}
}()
return nil


+ 3
- 3
abci/server/socket_server.go View File

@ -61,7 +61,7 @@ func (s *SocketServer) OnStart(ctx context.Context) error {
func (s *SocketServer) OnStop() {
if err := s.listener.Close(); err != nil {
s.logger.Error("Error closing listener", "err", err)
s.logger.Error("error closing listener", "err", err)
}
s.connsMtx.Lock()
@ -70,7 +70,7 @@ func (s *SocketServer) OnStop() {
for id, conn := range s.conns {
delete(s.conns, id)
if err := conn.Close(); err != nil {
s.logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
s.logger.Error("error closing connection", "id", id, "conn", conn, "err", err)
}
}
}
@ -139,7 +139,7 @@ func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, c
defer func() {
// Close the connection
if err := s.rmConn(connID); err != nil {
s.logger.Error("Error closing connection", "err", err)
s.logger.Error("error closing connection", "err", err)
}
}()


+ 1
- 1
cmd/tendermint/commands/reset_priv_validator.go View File

@ -53,7 +53,7 @@ func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger)
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
}
// recreate the dbDir since the privVal state needs to live there
if err := tmos.EnsureDir(dbDir, 0700); err != nil {


+ 1
- 1
cmd/tendermint/commands/root.go View File

@ -36,7 +36,7 @@ func ParseConfig() (*cfg.Config, error) {
conf.SetRoot(conf.RootDir)
cfg.EnsureRoot(conf.RootDir)
if err := conf.ValidateBasic(); err != nil {
return nil, fmt.Errorf("error in config file: %v", err)
return nil, fmt.Errorf("error in config file: %w", err)
}
return conf, nil
}


+ 3
- 3
internal/blocksync/pool.go View File

@ -236,7 +236,7 @@ func (pool *BlockPool) PopRequest() {
if r := pool.requesters[pool.height]; r != nil {
if err := r.Stop(); err != nil {
pool.logger.Error("Error stopping requester", "err", err)
pool.logger.Error("error stopping requester", "err", err)
}
delete(pool.requesters, pool.height)
pool.height++
@ -425,7 +425,7 @@ func (pool *BlockPool) makeNextRequester(ctx context.Context) {
err := request.Start(ctx)
if err != nil {
request.logger.Error("Error starting request", "err", err)
request.logger.Error("error starting request", "err", err)
}
}
@ -677,7 +677,7 @@ OUTER_LOOP:
return
case <-bpr.pool.exitedCh:
if err := bpr.Stop(); err != nil {
bpr.logger.Error("Error stopped requester", "err", err)
bpr.logger.Error("error stopped requester", "err", err)
}
return
case peerID := <-bpr.redoCh:


+ 2
- 2
internal/consensus/byzantine_test.go View File

@ -196,7 +196,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
if lazyNodeState.privValidatorPubKey == nil {
// If this node is a validator & proposer in the current round, it will
// miss the opportunity to create a block.
lazyNodeState.logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet))
lazyNodeState.logger.Error("enterPropose", "err", errPubKeyIsNotSet)
return
}
proposerAddr := lazyNodeState.privValidatorPubKey.Address()
@ -209,7 +209,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
// and the privValidator will refuse to sign anything.
if err := lazyNodeState.wal.FlushAndSync(); err != nil {
lazyNodeState.logger.Error("Error flushing to disk")
lazyNodeState.logger.Error("error flushing to disk")
}
// Make proposal


+ 1
- 1
internal/consensus/common_test.go View File

@ -156,7 +156,7 @@ func signVote(
v, err := vs.signVote(ctx, cfg, voteType, hash, header)
if err != nil {
panic(fmt.Errorf("failed to sign vote: %v", err))
panic(fmt.Errorf("failed to sign vote: %w", err))
}
vs.lastVote = v


+ 2
- 2
internal/consensus/mempool_test.go View File

@ -125,7 +125,7 @@ func deliverTxsRange(ctx context.Context, cs *State, start, end int) {
binary.BigEndian.PutUint64(txBytes, uint64(i))
err := assertMempool(cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{})
if err != nil {
panic(fmt.Sprintf("Error after CheckTx: %v", err))
panic(fmt.Errorf("error after CheckTx: %w", err))
}
}
}
@ -200,7 +200,7 @@ func TestMempoolRmBadTx(t *testing.T) {
checkTxRespCh <- struct{}{}
}, mempool.TxInfo{})
if err != nil {
t.Errorf("error after CheckTx: %v", err)
t.Errorf("error after CheckTx: %w", err)
return
}


+ 4
- 4
internal/consensus/msgs.go View File

@ -109,7 +109,7 @@ func (m *NewValidBlockMessage) ValidateBasic() error {
return errors.New("negative Round")
}
if err := m.BlockPartSetHeader.ValidateBasic(); err != nil {
return fmt.Errorf("wrong BlockPartSetHeader: %v", err)
return fmt.Errorf("wrong BlockPartSetHeader: %w", err)
}
if m.BlockParts.Size() == 0 {
return errors.New("empty blockParts")
@ -191,7 +191,7 @@ func (m *BlockPartMessage) ValidateBasic() error {
return errors.New("negative Round")
}
if err := m.Part.ValidateBasic(); err != nil {
return fmt.Errorf("wrong Part: %v", err)
return fmt.Errorf("wrong Part: %w", err)
}
return nil
}
@ -266,7 +266,7 @@ func (m *VoteSetMaj23Message) ValidateBasic() error {
return errors.New("invalid Type")
}
if err := m.BlockID.ValidateBasic(); err != nil {
return fmt.Errorf("wrong BlockID: %v", err)
return fmt.Errorf("wrong BlockID: %w", err)
}
return nil
@ -296,7 +296,7 @@ func (m *VoteSetBitsMessage) ValidateBasic() error {
return errors.New("invalid Type")
}
if err := m.BlockID.ValidateBasic(); err != nil {
return fmt.Errorf("wrong BlockID: %v", err)
return fmt.Errorf("wrong BlockID: %w", err)
}
// NOTE: Votes.Size() can be zero if the node does not have any


+ 2
- 2
internal/consensus/replay.go View File

@ -242,7 +242,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err
// Handshake is done via ABCI Info on the query conn.
res, err := proxyApp.Query().InfoSync(ctx, proxy.RequestInfo)
if err != nil {
return fmt.Errorf("error calling Info: %v", err)
return fmt.Errorf("error calling Info: %w", err)
}
blockHeight := res.LastBlockHeight
@ -266,7 +266,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err
// Replay blocks up to the latest in the blockstore.
_, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp)
if err != nil {
return fmt.Errorf("error on replay: %v", err)
return fmt.Errorf("error on replay: %w", err)
}
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",


+ 2
- 2
internal/consensus/replay_file.go View File

@ -74,7 +74,7 @@ func (cs *State) ReplayFile(ctx context.Context, file string, console bool) erro
defer func() {
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
if err := cs.eventBus.Unsubscribe(ctx, args); err != nil {
cs.logger.Error("Error unsubscribing to event bus", "err", err)
cs.logger.Error("error unsubscribing to event bus", "err", err)
}
}()
@ -237,7 +237,7 @@ func (pb *playback) replayConsoleLoop(ctx context.Context) (int, error) {
defer func() {
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
if err := pb.cs.eventBus.Unsubscribe(ctx, args); err != nil {
pb.cs.logger.Error("Error unsubscribing from eventBus", "err", err)
pb.cs.logger.Error("error unsubscribing from eventBus", "err", err)
}
}()


+ 8
- 14
internal/consensus/replay_test.go View File

@ -704,14 +704,14 @@ func TestMockProxyApp(t *testing.T) {
func tempWALWithData(data []byte) string {
walFile, err := os.CreateTemp("", "wal")
if err != nil {
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
panic(fmt.Errorf("failed to create temp WAL file: %w", err))
}
_, err = walFile.Write(data)
if err != nil {
panic(fmt.Sprintf("failed to write to temp WAL file: %v", err))
panic(fmt.Errorf("failed to write to temp WAL file: %w", err))
}
if err := walFile.Close(); err != nil {
panic(fmt.Sprintf("failed to close temp WAL file: %v", err))
panic(fmt.Errorf("failed to close temp WAL file: %w", err))
}
return walFile.Name()
}
@ -825,9 +825,7 @@ func testHandshakeReplay(
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics())
if err := proxyApp.Start(ctx); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
t.Cleanup(func() { cancel(); proxyApp.Wait() })
@ -835,9 +833,8 @@ func testHandshakeReplay(
if expectError {
require.Error(t, err)
return
} else if err != nil {
t.Fatalf("Error on abci handshake: %v", err)
}
require.NoError(t, err, "Error on abci handshake")
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync(ctx, abci.RequestInfo{Version: ""})
@ -1326,13 +1323,10 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
logger := log.TestingLogger()
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
if err := proxyApp.Start(ctx); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
require.NoError(t, handshaker.Handshake(ctx, proxyApp), "error on abci handshake")
if err := handshaker.Handshake(ctx, proxyApp); err != nil {
t.Fatalf("Error on abci handshake: %v", err)
}
// reload the state, check the validator set was updated
state, err = stateStore.Load()
require.NoError(t, err)


+ 3
- 3
internal/consensus/state.go View File

@ -1509,7 +1509,7 @@ func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32)
// Validate the block.
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
panic(fmt.Sprintf("precommit step; +2/3 prevoted for an invalid block: %v", err))
panic(fmt.Errorf("precommit step; +2/3 prevoted for an invalid block: %w", err))
}
cs.LockedRound = round
@ -1815,7 +1815,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
if cs.privValidator != nil {
if cs.privValidatorPubKey == nil {
// Metrics won't be updated, but it's not critical.
cs.logger.Error(fmt.Sprintf("recordMetrics: %v", errPubKeyIsNotSet))
cs.logger.Error("recordMetrics", "err", errPubKeyIsNotSet)
} else {
address = cs.privValidatorPubKey.Address()
}
@ -2336,7 +2336,7 @@ func (cs *State) signAddVote(ctx context.Context, msgType tmproto.SignedMsgType,
if cs.privValidatorPubKey == nil {
// Vote won't be signed, but it's not critical.
cs.logger.Error(fmt.Sprintf("signAddVote: %v", errPubKeyIsNotSet))
cs.logger.Error("signAddVote", "err", errPubKeyIsNotSet)
return nil
}


+ 2
- 4
internal/consensus/types/height_vote_set_test.go View File

@ -2,10 +2,10 @@ package types
import (
"context"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/test/factory"
@ -92,9 +92,7 @@ func makeVoteHR(
v := vote.ToProto()
err = privVal.SignVote(ctx, chainID, v)
if err != nil {
panic(fmt.Sprintf("Error signing vote: %v", err))
}
require.NoError(t, err, "Error signing vote")
vote.Signature = v.Signature


+ 4
- 4
internal/consensus/wal.go View File

@ -194,7 +194,7 @@ func (wal *BaseWAL) Write(msg WALMessage) error {
}
if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil {
wal.logger.Error("Error writing msg to consensus wal. WARNING: recover may not be possible for the current height",
wal.logger.Error("error writing msg to consensus wal. WARNING: recover may not be possible for the current height",
"err", err, "msg", msg)
return err
}
@ -377,14 +377,14 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
return nil, err
}
if err != nil {
return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)}
return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %w", err)}
}
crc := binary.BigEndian.Uint32(b)
b = make([]byte, 4)
_, err = dec.rd.Read(b)
if err != nil {
return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)}
return nil, DataCorruptionError{fmt.Errorf("failed to read length: %w", err)}
}
length := binary.BigEndian.Uint32(b)
@ -410,7 +410,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
var res = new(tmcons.TimedWALMessage)
err = proto.Unmarshal(data, res)
if err != nil {
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)}
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %w", err)}
}
walMsg, err := WALFromProto(res.Msg)


+ 1
- 1
internal/evidence/pool.go View File

@ -382,7 +382,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide
iter, err := dbm.IteratePrefix(evpool.evidenceStore, prefixToBytes(prefixKey))
if err != nil {
return nil, totalSize, fmt.Errorf("database error: %v", err)
return nil, totalSize, fmt.Errorf("database error: %w", err)
}
defer iter.Close()


+ 2
- 2
internal/libs/autofile/group.go View File

@ -150,7 +150,7 @@ func (g *Group) OnStart(ctx context.Context) error {
func (g *Group) OnStop() {
g.ticker.Stop()
if err := g.FlushAndSync(); err != nil {
g.logger.Error("Error flushing to disk", "err", err)
g.logger.Error("error flushing to disk", "err", err)
}
}
@ -164,7 +164,7 @@ func (g *Group) Wait() {
// Close closes the head file. The group must be stopped by this moment.
func (g *Group) Close() {
if err := g.FlushAndSync(); err != nil {
g.logger.Error("Error flushing to disk", "err", err)
g.logger.Error("error flushing to disk", "err", err)
}
g.mtx.Lock()


+ 1
- 1
internal/libs/queue/queue_test.go View File

@ -161,7 +161,7 @@ func TestWait(t *testing.T) {
defer close(done)
got, err := q.Wait(context.Background())
if err != nil {
t.Errorf("Wait: unexpected error: %v", err)
t.Errorf("Wait: unexpected error: %w", err)
} else if got != input {
t.Errorf("Wait: got %q, want %q", got, input)
}


+ 2
- 2
internal/p2p/conn/connection.go View File

@ -295,7 +295,7 @@ func (c *MConnection) _recover(ctx context.Context) {
func (c *MConnection) stopForError(ctx context.Context, r interface{}) {
if err := c.Stop(); err != nil {
c.logger.Error("Error stopping connection", "err", err)
c.logger.Error("error stopping connection", "err", err)
}
if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
@ -490,7 +490,7 @@ FOR_LOOP:
if err == nil {
// return
} else {
c.logger.Debug("Error peeking connection buffer", "err", err)
c.logger.Debug("error peeking connection buffer", "err", err)
// return nil
}
c.logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)


+ 8
- 8
internal/p2p/conn/secret_connection_test.go View File

@ -125,7 +125,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
nodePrvKey := ed25519.GenPrivKey()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil {
t.Errorf("failed to establish SecretConnection for node: %v", err)
t.Errorf("failed to establish SecretConnection for node: %w", err)
return nil, true, err
}
// In parallel, handle some reads and writes.
@ -135,7 +135,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil {
t.Errorf("failed to write to nodeSecretConn: %v", err)
t.Errorf("failed to write to nodeSecretConn: %w", err)
return nil, true, err
}
if n != len(nodeWrite) {
@ -162,7 +162,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
}
return nil, false, nil
} else if err != nil {
t.Errorf("failed to read from nodeSecretConn: %v", err)
t.Errorf("failed to read from nodeSecretConn: %w", err)
return nil, true, err
}
*nodeReads = append(*nodeReads, string(readBuffer[:n]))
@ -287,7 +287,7 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n i
for i := 0; i < n; i++ {
_, err := conn.Write([]byte(txt))
if err != nil {
t.Errorf("failed to write to fooSecConn: %v", err)
t.Errorf("failed to write to fooSecConn: %w", err)
return
}
}
@ -342,7 +342,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
func(_ int) (val interface{}, abort bool, err error) {
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
if err != nil {
tb.Errorf("failed to establish SecretConnection for foo: %v", err)
tb.Errorf("failed to establish SecretConnection for foo: %w", err)
return nil, true, err
}
remotePubBytes := fooSecConn.RemotePubKey()
@ -357,7 +357,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
func(_ int) (val interface{}, abort bool, err error) {
barSecConn, err = MakeSecretConnection(barConn, barPrvKey)
if barSecConn == nil {
tb.Errorf("failed to establish SecretConnection for bar: %v", err)
tb.Errorf("failed to establish SecretConnection for bar: %w", err)
return nil, true, err
}
remotePubBytes := barSecConn.RemotePubKey()
@ -404,7 +404,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
if err == io.EOF {
return
} else if err != nil {
b.Errorf("failed to read from barSecConn: %v", err)
b.Errorf("failed to read from barSecConn: %w", err)
return
}
}
@ -415,7 +415,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
idx := mrand.Intn(len(fooWriteBytes))
_, err := fooSecConn.Write(fooWriteBytes[idx])
if err != nil {
b.Errorf("failed to write to fooSecConn: %v", err)
b.Errorf("failed to write to fooSecConn: %w", err)
return
}
}


+ 12
- 30
internal/proxy/app_conn_test.go View File

@ -6,6 +6,7 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/require"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/server"
@ -56,20 +57,14 @@ func TestEcho(t *testing.T) {
// Start server
s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication())
if err := s.Start(ctx); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
}
require.NoError(t, s.Start(ctx), "error starting socket server")
t.Cleanup(func() { cancel(); s.Wait() })
// Start client
cli, err := clientCreator(logger.With("module", "abci-client"))
if err != nil {
t.Fatalf("Error creating ABCI client: %v", err.Error())
}
require.NoError(t, err, "Error creating ABCI client:")
if err := cli.Start(ctx); err != nil {
t.Fatalf("Error starting ABCI client: %v", err.Error())
}
require.NoError(t, cli.Start(ctx), "Error starting ABCI client")
proxy := newAppConnTest(cli)
t.Log("Connected")
@ -102,20 +97,14 @@ func BenchmarkEcho(b *testing.B) {
// Start server
s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication())
if err := s.Start(ctx); err != nil {
b.Fatalf("Error starting socket server: %v", err.Error())
}
require.NoError(b, s.Start(ctx), "Error starting socket server")
b.Cleanup(func() { cancel(); s.Wait() })
// Start client
cli, err := clientCreator(logger.With("module", "abci-client"))
if err != nil {
b.Fatalf("Error creating ABCI client: %v", err.Error())
}
require.NoError(b, err, "Error creating ABCI client")
if err := cli.Start(ctx); err != nil {
b.Fatalf("Error starting ABCI client: %v", err.Error())
}
require.NoError(b, cli.Start(ctx), "Error starting ABCI client")
proxy := newAppConnTest(cli)
b.Log("Connected")
@ -153,28 +142,21 @@ func TestInfo(t *testing.T) {
// Start server
s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication())
if err := s.Start(ctx); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
}
require.NoError(t, s.Start(ctx), "Error starting socket server")
t.Cleanup(func() { cancel(); s.Wait() })
// Start client
cli, err := clientCreator(logger.With("module", "abci-client"))
if err != nil {
t.Fatalf("Error creating ABCI client: %v", err.Error())
}
require.NoError(t, err, "Error creating ABCI client")
if err := cli.Start(ctx); err != nil {
t.Fatalf("Error starting ABCI client: %v", err.Error())
}
require.NoError(t, cli.Start(ctx), "Error starting ABCI client")
proxy := newAppConnTest(cli)
t.Log("Connected")
resInfo, err := proxy.InfoSync(context.Background(), RequestInfo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
require.NoError(t, err)
if resInfo.Data != "{\"size\":0}" {
t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else")
}


+ 1
- 1
internal/pubsub/pubsub.go View File

@ -385,7 +385,7 @@ func (s *Server) run(ctx context.Context) {
// Sender: Service the queue and forward messages to subscribers.
for it := range queue {
if err := s.send(it.Data, it.Events); err != nil {
s.logger.Error("Error sending event", "err", err)
s.logger.Error("error sending event", "err", err)
}
}
// Terminate all subscribers before exit.


+ 1
- 1
internal/pubsub/query/query_test.go View File

@ -233,7 +233,7 @@ func TestAllMatchesAll(t *testing.T) {
for i := 0; i < len(events); i++ {
match, err := query.All.Matches(events[:i])
if err != nil {
t.Errorf("Matches failed: %v", err)
t.Errorf("Matches failed: %w", err)
} else if !match {
t.Errorf("Did not match on %+v ", events[:i])
}


+ 1
- 1
internal/pubsub/query/syntax/syntax_test.go View File

@ -55,7 +55,7 @@ func TestScanner(t *testing.T) {
got = append(got, s.Token())
}
if err := s.Err(); err != io.EOF {
t.Errorf("Next: unexpected error: %v", err)
t.Errorf("Next: unexpected error: %w", err)
}
if !reflect.DeepEqual(got, test.want) {


+ 1
- 1
internal/rpc/core/mempool.go View File

@ -90,7 +90,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
count++
select {
case <-ctx.Context().Done():
env.Logger.Error("Error on broadcastTxCommit",
env.Logger.Error("error on broadcastTxCommit",
"duration", time.Since(startAt),
"err", err)
return &coretypes.ResultBroadcastTxCommit{


+ 5
- 5
internal/state/execution.go View File

@ -184,7 +184,7 @@ func (blockExec *BlockExecutor) ApplyBlock(
abciValUpdates := abciResponses.EndBlock.ValidatorUpdates
err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator)
if err != nil {
return state, fmt.Errorf("error in validator updates: %v", err)
return state, fmt.Errorf("error in validator updates: %w", err)
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates)
@ -198,13 +198,13 @@ func (blockExec *BlockExecutor) ApplyBlock(
// Update the state with the block and responses.
state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
if err != nil {
return state, fmt.Errorf("commit failed for application: %v", err)
return state, fmt.Errorf("commit failed for application: %w", err)
}
// Lock mempool, commit app state, update mempoool.
appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.DeliverTxs)
if err != nil {
return state, fmt.Errorf("commit failed for application: %v", err)
return state, fmt.Errorf("commit failed for application: %w", err)
}
// Update evpool with the latest state.
@ -460,7 +460,7 @@ func updateState(
if len(validatorUpdates) > 0 {
err := nValSet.UpdateWithChangeSet(validatorUpdates)
if err != nil {
return state, fmt.Errorf("error changing validator set: %v", err)
return state, fmt.Errorf("error changing validator set: %w", err)
}
// Change results from this height but only applies to the next next height.
lastHeightValsChanged = header.Height + 1 + 1
@ -477,7 +477,7 @@ func updateState(
nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.EndBlock.ConsensusParamUpdates)
err := nextParams.ValidateConsensusParams()
if err != nil {
return state, fmt.Errorf("error updating consensus params: %v", err)
return state, fmt.Errorf("error updating consensus params: %w", err)
}
state.Version.Consensus.App = nextParams.Version.AppVersion


+ 4
- 4
internal/state/indexer/sink/psql/psql_test.go View File

@ -246,11 +246,11 @@ func readSchema() ([]*schema.Migration, error) {
func resetDatabase(db *sql.DB) error {
_, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`)
if err != nil {
return fmt.Errorf("dropping tables: %v", err)
return fmt.Errorf("dropping tables: %w", err)
}
_, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`)
if err != nil {
return fmt.Errorf("dropping views: %v", err)
return fmt.Errorf("dropping views: %w", err)
}
return nil
}
@ -282,7 +282,7 @@ SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1;
txr := new(abci.TxResult)
if err := proto.Unmarshal(resultData, txr); err != nil {
return nil, fmt.Errorf("unmarshaling txr: %v", err)
return nil, fmt.Errorf("unmarshaling txr: %w", err)
}
return txr, nil
@ -313,7 +313,7 @@ SELECT type, height, chain_id FROM `+viewBlockEvents+`
`, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
t.Fatalf("Database query failed: %c", err)
}
if err := testDB().QueryRow(`


+ 1
- 1
internal/state/indexer/tx/kv/kv.go View File

@ -53,7 +53,7 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) {
txResult := new(abci.TxResult)
err = proto.Unmarshal(rawBytes, txResult)
if err != nil {
return nil, fmt.Errorf("error reading TxResult: %v", err)
return nil, fmt.Errorf("error reading TxResult: %w", err)
}
return txResult, nil


+ 2
- 2
internal/state/state.go View File

@ -330,11 +330,11 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) {
func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) {
genDocJSON, err := os.ReadFile(genDocFile)
if err != nil {
return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err)
return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err)
}
genDoc, err := types.GenesisDocFromJSON(genDocJSON)
if err != nil {
return nil, fmt.Errorf("error reading GenesisDoc: %v", err)
return nil, fmt.Errorf("error reading GenesisDoc: %w", err)
}
return genDoc, nil
}


+ 4
- 4
internal/store/store.go View File

@ -149,7 +149,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
if err != nil {
// NOTE: The existence of meta should imply the existence of the
// block. So, make sure meta is only saved after blocks are saved.
panic(fmt.Sprintf("Error reading block: %v", err))
panic(fmt.Errorf("error reading block: %w", err))
}
block, err := types.BlockFromProto(pbb)
@ -221,7 +221,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
}
part, err := types.PartFromProto(pbpart)
if err != nil {
panic(fmt.Sprintf("Error reading block part: %v", err))
panic(fmt.Errorf("error reading block part: %w", err))
}
return part
@ -273,7 +273,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
}
commit, err := types.CommitFromProto(pbc)
if err != nil {
panic(fmt.Sprintf("Error reading block commit: %v", err))
panic(fmt.Errorf("error reading block commit: %w", err))
}
return commit
}
@ -293,7 +293,7 @@ func (bs *BlockStore) LoadSeenCommit() *types.Commit {
}
err = proto.Unmarshal(bz, pbc)
if err != nil {
panic(fmt.Sprintf("error reading block seen commit: %v", err))
panic(fmt.Errorf("error reading block seen commit: %w", err))
}
commit, err := types.CommitFromProto(pbc)


+ 1
- 1
light/detector.go View File

@ -257,7 +257,7 @@ func (c *Client) handleConflictingHeaders(
now,
)
if err != nil {
c.logger.Info("Error validating primary's divergent header", "primary", c.primary, "err", err)
c.logger.Info("error validating primary's divergent header", "primary", c.primary, "err", err)
return ErrLightClientAttack
}


+ 4
- 4
node/node.go View File

@ -173,7 +173,7 @@ func makeNode(
// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), nodeMetrics.proxy)
if err := proxyApp.Start(ctx); err != nil {
return nil, fmt.Errorf("error starting proxy app connections: %v", err)
return nil, fmt.Errorf("error starting proxy app connections: %w", err)
}
// EventBus and IndexerService must be started before the handshake because
@ -703,7 +703,7 @@ func (n *nodeImpl) OnStop() {
for _, l := range n.rpcListeners {
n.logger.Info("Closing rpc listener", "listener", l)
if err := l.Close(); err != nil {
n.logger.Error("Error closing listener", "listener", l, "err", err)
n.logger.Error("error closing listener", "listener", l, "err", err)
}
}
@ -811,7 +811,7 @@ func (n *nodeImpl) startRPC(ctx context.Context) ([]net.Listener, error) {
rpcLogger,
cfg,
); err != nil {
n.logger.Error("Error serving server with TLS", "err", err)
n.logger.Error("error serving server with TLS", "err", err)
}
}()
} else {
@ -823,7 +823,7 @@ func (n *nodeImpl) startRPC(ctx context.Context) ([]net.Listener, error) {
rpcLogger,
cfg,
); err != nil {
n.logger.Error("Error serving server", "err", err)
n.logger.Error("error serving server", "err", err)
}
}()
}


+ 6
- 6
privval/file.go View File

@ -267,7 +267,7 @@ func (pv *FilePV) GetPubKey(ctx context.Context) (crypto.PubKey, error) {
// chainID. Implements PrivValidator.
func (pv *FilePV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error {
if err := pv.signVote(chainID, vote); err != nil {
return fmt.Errorf("error signing vote: %v", err)
return fmt.Errorf("error signing vote: %w", err)
}
return nil
}
@ -276,7 +276,7 @@ func (pv *FilePV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vo
// the chainID. Implements PrivValidator.
func (pv *FilePV) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error {
if err := pv.signProposal(chainID, proposal); err != nil {
return fmt.Errorf("error signing proposal: %v", err)
return fmt.Errorf("error signing proposal: %w", err)
}
return nil
}
@ -435,10 +435,10 @@ func (pv *FilePV) saveSigned(height int64, round int32, step int8, signBytes []b
func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool, error) {
var lastVote, newVote tmproto.CanonicalVote
if err := protoio.UnmarshalDelimited(lastSignBytes, &lastVote); err != nil {
return time.Time{}, false, fmt.Errorf("LastSignBytes cannot be unmarshalled into vote: %v", err)
return time.Time{}, false, fmt.Errorf("LastSignBytes cannot be unmarshalled into vote: %w", err)
}
if err := protoio.UnmarshalDelimited(newSignBytes, &newVote); err != nil {
return time.Time{}, false, fmt.Errorf("signBytes cannot be unmarshalled into vote: %v", err)
return time.Time{}, false, fmt.Errorf("signBytes cannot be unmarshalled into vote: %w", err)
}
lastTime := lastVote.Timestamp
@ -455,10 +455,10 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T
func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool, error) {
var lastProposal, newProposal tmproto.CanonicalProposal
if err := protoio.UnmarshalDelimited(lastSignBytes, &lastProposal); err != nil {
return time.Time{}, false, fmt.Errorf("LastSignBytes cannot be unmarshalled into proposal: %v", err)
return time.Time{}, false, fmt.Errorf("LastSignBytes cannot be unmarshalled into proposal: %w", err)
}
if err := protoio.UnmarshalDelimited(newSignBytes, &newProposal); err != nil {
return time.Time{}, false, fmt.Errorf("signBytes cannot be unmarshalled into proposal: %v", err)
return time.Time{}, false, fmt.Errorf("signBytes cannot be unmarshalled into proposal: %w", err)
}
lastTime := lastProposal.Timestamp


+ 6
- 6
rpc/jsonrpc/server/ws_handler.go View File

@ -305,7 +305,7 @@ func (wsc *wsConnection) readRoutine(ctx context.Context) {
}
wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack()))
if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err)); err != nil {
wsc.Logger.Error("Error writing RPC response", "err", err)
wsc.Logger.Error("error writing RPC response", "err", err)
}
go wsc.readRoutine(ctx)
}
@ -333,7 +333,7 @@ func (wsc *wsConnection) readRoutine(ctx context.Context) {
wsc.Logger.Error("Failed to read request", "err", err)
}
if err := wsc.Stop(); err != nil {
wsc.Logger.Error("Error closing websocket connection", "err", err)
wsc.Logger.Error("error closing websocket connection", "err", err)
}
close(wsc.readRoutineQuit)
return
@ -345,7 +345,7 @@ func (wsc *wsConnection) readRoutine(ctx context.Context) {
if err != nil {
if err := wsc.WriteRPCResponse(writeCtx,
rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil {
wsc.Logger.Error("Error writing RPC response", "err", err)
wsc.Logger.Error("error writing RPC response", "err", err)
}
continue
}
@ -364,7 +364,7 @@ func (wsc *wsConnection) readRoutine(ctx context.Context) {
rpcFunc := wsc.funcMap[request.Method]
if rpcFunc == nil {
if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCMethodNotFoundError(request.ID)); err != nil {
wsc.Logger.Error("Error writing RPC response", "err", err)
wsc.Logger.Error("error writing RPC response", "err", err)
}
continue
}
@ -377,7 +377,7 @@ func (wsc *wsConnection) readRoutine(ctx context.Context) {
if err := wsc.WriteRPCResponse(writeCtx,
rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)),
); err != nil {
wsc.Logger.Error("Error writing RPC response", "err", err)
wsc.Logger.Error("error writing RPC response", "err", err)
}
continue
}
@ -414,7 +414,7 @@ func (wsc *wsConnection) readRoutine(ctx context.Context) {
}
if err := wsc.WriteRPCResponse(writeCtx, resp); err != nil {
wsc.Logger.Error("Error writing RPC response", "err", err)
wsc.Logger.Error("error writing RPC response", "err", err)
}
}


+ 5
- 5
scripts/json2wal/main.go View File

@ -27,13 +27,13 @@ func main() {
f, err := os.Open(os.Args[1])
if err != nil {
panic(fmt.Errorf("failed to open WAL file: %v", err))
panic(fmt.Errorf("failed to open WAL file: %w", err))
}
defer f.Close()
walFile, err := os.OpenFile(os.Args[2], os.O_EXCL|os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
panic(fmt.Errorf("failed to open WAL file: %v", err))
panic(fmt.Errorf("failed to open WAL file: %w", err))
}
defer walFile.Close()
@ -48,7 +48,7 @@ func main() {
if err == io.EOF {
break
} else if err != nil {
panic(fmt.Errorf("failed to read file: %v", err))
panic(fmt.Errorf("failed to read file: %w", err))
}
// ignore the ENDHEIGHT in json.File
if strings.HasPrefix(string(msgJSON), "ENDHEIGHT") {
@ -58,12 +58,12 @@ func main() {
var msg consensus.TimedWALMessage
err = tmjson.Unmarshal(msgJSON, &msg)
if err != nil {
panic(fmt.Errorf("failed to unmarshal json: %v", err))
panic(fmt.Errorf("failed to unmarshal json: %w", err))
}
err = dec.Encode(&msg)
if err != nil {
panic(fmt.Errorf("failed to encode msg: %v", err))
panic(fmt.Errorf("failed to encode msg: %w", err))
}
}
}

+ 3
- 3
scripts/wal2json/main.go View File

@ -24,7 +24,7 @@ func main() {
f, err := os.Open(os.Args[1])
if err != nil {
panic(fmt.Errorf("failed to open WAL file: %v", err))
panic(fmt.Errorf("failed to open WAL file: %w", err))
}
defer f.Close()
@ -34,12 +34,12 @@ func main() {
if err == io.EOF {
break
} else if err != nil {
panic(fmt.Errorf("failed to decode msg: %v", err))
panic(fmt.Errorf("failed to decode msg: %w", err))
}
json, err := tmjson.Marshal(msg)
if err != nil {
panic(fmt.Errorf("failed to marshal msg: %v", err))
panic(fmt.Errorf("failed to marshal msg: %w", err))
}
_, err = os.Stdout.Write(json)


+ 1
- 1
test/e2e/runner/load.go View File

@ -104,7 +104,7 @@ func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSiz
bz := make([]byte, txSize)
_, err := r.Read(bz)
if err != nil {
panic(fmt.Sprintf("Failed to read random bytes: %v", err))
panic(fmt.Errorf("failed to read random bytes: %w", err))
}
tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz))


+ 2
- 2
test/e2e/runner/main.go View File

@ -61,7 +61,7 @@ func NewCLI() *CLI {
logger.Info("Preserving testnet that encountered error",
"err", err)
} else if err := Cleanup(cli.testnet); err != nil {
logger.Error("Error cleaning up testnet contents", "err", err)
logger.Error("error cleaning up testnet contents", "err", err)
}
}()
if err = Setup(cli.testnet); err != nil {
@ -302,7 +302,7 @@ Does not run any perbutations.
}
defer func() {
if err := Cleanup(cli.testnet); err != nil {
logger.Error("Error cleaning up testnet contents", "err", err)
logger.Error("error cleaning up testnet contents", "err", err)
}
}()


+ 9
- 9
types/block.go View File

@ -68,7 +68,7 @@ func (b *Block) ValidateBasic() error {
return errors.New("nil LastCommit")
}
if err := b.LastCommit.ValidateBasic(); err != nil {
return fmt.Errorf("wrong LastCommit: %v", err)
return fmt.Errorf("wrong LastCommit: %w", err)
}
if w, g := b.LastCommit.Hash(), b.LastCommitHash; !bytes.Equal(w, g) {
@ -402,15 +402,15 @@ func (h Header) ValidateBasic() error {
}
if err := ValidateHash(h.LastCommitHash); err != nil {
return fmt.Errorf("wrong LastCommitHash: %v", err)
return fmt.Errorf("wrong LastCommitHash: %w", err)
}
if err := ValidateHash(h.DataHash); err != nil {
return fmt.Errorf("wrong DataHash: %v", err)
return fmt.Errorf("wrong DataHash: %w", err)
}
if err := ValidateHash(h.EvidenceHash); err != nil {
return fmt.Errorf("wrong EvidenceHash: %v", err)
return fmt.Errorf("wrong EvidenceHash: %w", err)
}
if len(h.ProposerAddress) != crypto.AddressSize {
@ -423,17 +423,17 @@ func (h Header) ValidateBasic() error {
// Basic validation of hashes related to application data.
// Will validate fully against state in state#ValidateBlock.
if err := ValidateHash(h.ValidatorsHash); err != nil {
return fmt.Errorf("wrong ValidatorsHash: %v", err)
return fmt.Errorf("wrong ValidatorsHash: %w", err)
}
if err := ValidateHash(h.NextValidatorsHash); err != nil {
return fmt.Errorf("wrong NextValidatorsHash: %v", err)
return fmt.Errorf("wrong NextValidatorsHash: %w", err)
}
if err := ValidateHash(h.ConsensusHash); err != nil {
return fmt.Errorf("wrong ConsensusHash: %v", err)
return fmt.Errorf("wrong ConsensusHash: %w", err)
}
// NOTE: AppHash is arbitrary length
if err := ValidateHash(h.LastResultsHash); err != nil {
return fmt.Errorf("wrong LastResultsHash: %v", err)
return fmt.Errorf("wrong LastResultsHash: %w", err)
}
return nil
@ -781,7 +781,7 @@ func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSe
}
added, err := voteSet.AddVote(commit.GetVote(int32(idx)))
if !added || err != nil {
panic(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
panic(fmt.Errorf("failed to reconstruct LastCommit: %w", err))
}
}
return voteSet


+ 1
- 1
types/proposal.go View File

@ -60,7 +60,7 @@ func (p *Proposal) ValidateBasic() error {
return errors.New("negative POLRound (exception: -1)")
}
if err := p.BlockID.ValidateBasic(); err != nil {
return fmt.Errorf("wrong BlockID: %v", err)
return fmt.Errorf("wrong BlockID: %w", err)
}
// ValidateBasic above would pass even if the BlockID was empty:
if !p.BlockID.IsComplete() {


+ 1
- 1
types/validator_set.go View File

@ -71,7 +71,7 @@ func NewValidatorSet(valz []*Validator) *ValidatorSet {
vals := &ValidatorSet{}
err := vals.updateWithChangeSet(valz, false)
if err != nil {
panic(fmt.Sprintf("Cannot create validator set: %v", err))
panic(fmt.Errorf("cannot create validator set: %w", err))
}
if len(valz) > 0 {
vals.IncrementProposerPriority(1)


+ 1
- 1
types/vote.go View File

@ -172,7 +172,7 @@ func (vote *Vote) ValidateBasic() error {
// NOTE: Timestamp validation is subtle and handled elsewhere.
if err := vote.BlockID.ValidateBasic(); err != nil {
return fmt.Errorf("wrong BlockID: %v", err)
return fmt.Errorf("wrong BlockID: %w", err)
}
// BlockID.ValidateBasic would not err if we for instance have an empty hash but a


+ 1
- 1
types/vote_set_test.go View File

@ -472,7 +472,7 @@ func TestVoteSet_MakeCommit(t *testing.T) {
// Ensure that Commit is good.
if err := commit.ValidateBasic(); err != nil {
t.Errorf("error in Commit.ValidateBasic(): %v", err)
t.Errorf("error in Commit.ValidateBasic(): %w", err)
}
}


Loading…
Cancel
Save