Browse Source

linting errors: clean it all up

pull/703/head
Zach Ramsay 7 years ago
committed by Ethan Buchman
parent
commit
8f0237610e
12 changed files with 25 additions and 34 deletions
  1. +1
    -1
      blockchain/pool.go
  2. +1
    -1
      cmd/tendermint/commands/reset_priv_validator.go
  3. +4
    -4
      config/toml.go
  4. +4
    -1
      consensus/reactor.go
  5. +3
    -7
      consensus/replay_file.go
  6. +1
    -1
      consensus/state.go
  7. +2
    -3
      mempool/mempool.go
  8. +1
    -1
      p2p/upnp/probe.go
  9. +4
    -2
      rpc/lib/client/ws_client.go
  10. +1
    -1
      rpc/lib/server/handlers.go
  11. +2
    -8
      rpc/lib/server/http_server.go
  12. +1
    -4
      types/part_set.go

+ 1
- 1
blockchain/pool.go View File

@ -313,7 +313,7 @@ func (pool *BlockPool) makeNextRequester() {
_, err := request.Start() _, err := request.Start()
if err != nil { if err != nil {
panic(err)
pool.Logger.Error("Error starting block pool", "err", err)
} }
} }


+ 1
- 1
cmd/tendermint/commands/reset_priv_validator.go View File

@ -48,7 +48,7 @@ func resetPrivValidator(cmd *cobra.Command, args []string) {
func ResetAll(dbDir, privValFile string, logger log.Logger) { func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorLocal(privValFile, logger) resetPrivValidatorLocal(privValFile, logger)
if err := os.RemoveAll(dbDir); err != nil { if err := os.RemoveAll(dbDir); err != nil {
panic(err)
logger.Error("Error removing directory", "err", err)
} }
logger.Info("Removed all data", "dir", dbDir) logger.Info("Removed all data", "dir", dbDir)
} }


+ 4
- 4
config/toml.go View File

@ -13,10 +13,10 @@ import (
func EnsureRoot(rootDir string) { func EnsureRoot(rootDir string) {
if err := cmn.EnsureDir(rootDir, 0700); err != nil { if err := cmn.EnsureDir(rootDir, 0700); err != nil {
panic(err)
cmn.PanicSanity(err.Error())
} }
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil { if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
panic(err)
cmn.PanicSanity(err.Error())
} }
configFilePath := path.Join(rootDir, "config.toml") configFilePath := path.Join(rootDir, "config.toml")
@ -69,10 +69,10 @@ func ResetTestRoot(testName string) *Config {
} }
// Create new dir // Create new dir
if err := cmn.EnsureDir(rootDir, 0700); err != nil { if err := cmn.EnsureDir(rootDir, 0700); err != nil {
panic(err)
cmn.PanicSanity(err.Error())
} }
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil { if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
panic(err)
cmn.PanicSanity(err.Error())
} }
configFilePath := path.Join(rootDir, "config.toml") configFilePath := path.Join(rootDir, "config.toml")


+ 4
- 1
consensus/reactor.go View File

@ -97,7 +97,10 @@ func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State, blocksSynced in
// dont bother with the WAL if we fast synced // dont bother with the WAL if we fast synced
conR.conS.doWALCatchup = false conR.conS.doWALCatchup = false
} }
conR.conS.Start()
_, err := conR.conS.Start()
if err != nil {
conR.Logger.Error("Error starting conR", "err", err)
}
} }
// GetChannels implements Reactor // GetChannels implements Reactor


+ 3
- 7
consensus/replay_file.go View File

@ -65,11 +65,7 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
} }
pb := newPlayback(file, fp, cs, cs.state.Copy()) pb := newPlayback(file, fp, cs, cs.state.Copy())
defer func() {
if err := pb.fp.Close(); err != nil {
return
}
}()
defer pb.fp.Close()
var nextN int // apply N msgs in a row var nextN int // apply N msgs in a row
var msg *TimedWALMessage var msg *TimedWALMessage
@ -227,7 +223,7 @@ func (pb *playback) replayConsoleLoop() int {
if len(tokens) == 1 { if len(tokens) == 1 {
if err := pb.replayReset(1, newStepCh); err != nil { if err := pb.replayReset(1, newStepCh); err != nil {
panic(err)
pb.cs.Logger.Error("Replay reset error", "err", err)
} }
} else { } else {
i, err := strconv.Atoi(tokens[1]) i, err := strconv.Atoi(tokens[1])
@ -237,7 +233,7 @@ func (pb *playback) replayConsoleLoop() int {
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
} else { } else {
if err := pb.replayReset(i, newStepCh); err != nil { if err := pb.replayReset(i, newStepCh); err != nil {
panic(err)
pb.cs.Logger.Error("Replay reset error", "err", err)
} }
} }
} }


+ 1
- 1
consensus/state.go View File

@ -259,7 +259,7 @@ func (cs *ConsensusState) OnStart() error {
func (cs *ConsensusState) startRoutines(maxSteps int) { func (cs *ConsensusState) startRoutines(maxSteps int) {
_, err := cs.timeoutTicker.Start() _, err := cs.timeoutTicker.Start()
if err != nil { if err != nil {
panic(err)
cs.Logger.Error("Error starting timeout ticker", "err", err)
} }
go cs.receiveRoutine(maxSteps) go cs.receiveRoutine(maxSteps)
} }


+ 2
- 3
mempool/mempool.go View File

@ -3,7 +3,6 @@ package mempool
import ( import (
"bytes" "bytes"
"container/list" "container/list"
"fmt"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -192,11 +191,11 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
// TODO: Notify administrators when WAL fails // TODO: Notify administrators when WAL fails
_, err := mem.wal.Write([]byte(tx)) _, err := mem.wal.Write([]byte(tx))
if err != nil { if err != nil {
mem.logger.Error(fmt.Sprintf("Error writing to WAL: %v", err))
mem.logger.Error("Error writing to WAL", "err", err)
} }
_, err = mem.wal.Write([]byte("\n")) _, err = mem.wal.Write([]byte("\n"))
if err != nil { if err != nil {
mem.logger.Error(fmt.Sprintf("Error writing to WAL: %v", err))
mem.logger.Error("Error writing to WAL", "err", err)
} }
} }
// END WAL // END WAL


+ 1
- 1
p2p/upnp/probe.go View File

@ -101,7 +101,7 @@ func Probe(logger log.Logger) (caps UPNPCapabilities, err error) {
logger.Error(cmn.Fmt("Port mapping delete error: %v", err)) logger.Error(cmn.Fmt("Port mapping delete error: %v", err))
} }
if err := listener.Close(); err != nil { if err := listener.Close(); err != nil {
panic(err)
logger.Error(cmn.Fmt("Listener closing error: %v", err))
} }
}() }()


+ 4
- 2
rpc/lib/client/ws_client.go View File

@ -353,7 +353,8 @@ func (c *WSClient) writeRoutine() {
defer func() { defer func() {
ticker.Stop() ticker.Stop()
if err := c.conn.Close(); err != nil { if err := c.conn.Close(); err != nil {
// panic(err) FIXME: this panic will trigger in tests
// ignore error; it will trigger in tests
// likely because it's closing and already closed connection
} }
c.wg.Done() c.wg.Done()
}() }()
@ -404,7 +405,8 @@ func (c *WSClient) writeRoutine() {
func (c *WSClient) readRoutine() { func (c *WSClient) readRoutine() {
defer func() { defer func() {
if err := c.conn.Close(); err != nil { if err := c.conn.Close(); err != nil {
// panic(err) FIXME: this panic will trigger in tests
// ignore error; it will trigger in tests
// likely because it's closing and already closed connection
} }
c.wg.Done() c.wg.Done()
}() }()


+ 1
- 1
rpc/lib/server/handlers.go View File

@ -720,7 +720,7 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ
wm.logger.Info("New websocket connection", "remote", con.remoteAddr) wm.logger.Info("New websocket connection", "remote", con.remoteAddr)
_, err = con.Start() // Blocking _, err = con.Start() // Blocking
if err != nil { if err != nil {
panic(err)
wm.logger.Error("Error starting connection", "err", err)
} }
} }


+ 2
- 8
rpc/lib/server/http_server.go View File

@ -56,10 +56,7 @@ func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RP
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpCode) w.WriteHeader(httpCode)
_, err = w.Write(jsonBytes)
if err != nil {
// ignore error
}
_, _ = w.Write(jsonBytes) // ignoring error
} }
func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
@ -69,10 +66,7 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200) w.WriteHeader(200)
_, err = w.Write(jsonBytes)
if err != nil {
// ignore error
}
_, _ = w.Write(jsonBytes) // ignoring error
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------


+ 1
- 4
types/part_set.go View File

@ -34,10 +34,7 @@ func (part *Part) Hash() []byte {
return part.hash return part.hash
} else { } else {
hasher := ripemd160.New() hasher := ripemd160.New()
_, err := hasher.Write(part.Bytes)
if err != nil {
// ignore error
}
_, _ := hasher.Write(part.Bytes) // ignoring error
part.hash = hasher.Sum(nil) part.hash = hasher.Sum(nil)
return part.hash return part.hash
} }


Loading…
Cancel
Save