Browse Source

errcheck; sort some stuff out

pull/703/head
Zach Ramsay 7 years ago
committed by Ethan Buchman
parent
commit
d7cb291fb2
26 changed files with 61 additions and 154 deletions
  1. +1
    -1
      Makefile
  2. +1
    -1
      benchmarks/map_test.go
  3. +1
    -1
      blockchain/reactor.go
  4. +12
    -12
      blockchain/store.go
  5. +5
    -12
      cmd/tendermint/commands/reset_priv_validator.go
  6. +1
    -4
      config/toml_test.go
  7. +1
    -1
      consensus/byzantine_test.go
  8. +1
    -6
      consensus/replay.go
  9. +1
    -5
      consensus/replay_file.go
  10. +1
    -6
      consensus/replay_test.go
  11. +1
    -1
      consensus/state.go
  12. +1
    -1
      node/id.go
  13. +8
    -40
      p2p/connection_test.go
  14. +1
    -1
      p2p/fuzz.go
  15. +5
    -25
      p2p/pex_reactor_test.go
  16. +5
    -5
      p2p/upnp/upnp.go
  17. +2
    -8
      p2p/util.go
  18. +2
    -2
      rpc/client/mock/abci.go
  19. +1
    -1
      rpc/grpc/client_server.go
  20. +2
    -2
      rpc/lib/client/http_client.go
  21. +2
    -2
      rpc/lib/client/ws_client.go
  22. +1
    -5
      rpc/lib/client/ws_client_test.go
  23. +1
    -4
      rpc/lib/server/handlers.go
  24. +2
    -2
      rpc/lib/server/http_server.go
  25. +1
    -5
      state/txindex/kv/kv_test.go
  26. +1
    -1
      types/part_set.go

+ 1
- 1
Makefile View File

@ -89,7 +89,6 @@ metalinter_test: ensure_tools
gometalinter --vendor --deadline=600s --disable-all \
--enable=deadcode \
--enable=gas \
--enable=goimports \
--enable=gosimple \
--enable=gotype \
--enable=ineffassign \
@ -104,6 +103,7 @@ metalinter_test: ensure_tools
#--enable=errcheck \
#--enable=goconst \
#--enable=gocyclo \
#--enable=goimports \
#--enable=golint \ <== comments on anything exported
#--enable=interfacer \
#--enable=megacheck \


+ 1
- 1
benchmarks/map_test.go View File

@ -1,4 +1,4 @@
package benchmarks // nolint (goimports)
package benchmarks // nolint: goimports
import (
"testing"


+ 1
- 1
blockchain/reactor.go View File

@ -228,7 +228,7 @@ FOR_LOOP:
}
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest() // nolint (errcheck)
go bcR.BroadcastStatusRequest() // nolint: errcheck
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()


+ 12
- 12
blockchain/store.go View File

@ -9,7 +9,7 @@ import (
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
)
@ -67,7 +67,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
}
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
}
bytez := []byte{}
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
@ -76,7 +76,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
}
block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block)
if err != nil {
PanicCrisis(Fmt("Error reading block: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block: %v", err))
}
return block
}
@ -90,7 +90,7 @@ func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part {
}
part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part)
if err != nil {
PanicCrisis(Fmt("Error reading block part: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block part: %v", err))
}
return part
}
@ -104,7 +104,7 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
}
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
}
return blockMeta
}
@ -120,7 +120,7 @@ func (bs *BlockStore) LoadBlockCommit(height int) *types.Commit {
}
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
}
return commit
}
@ -135,7 +135,7 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
}
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
}
return commit
}
@ -148,10 +148,10 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
height := block.Height
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
if !blockParts.IsComplete() {
PanicSanity(Fmt("BlockStore can only save complete block part sets"))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
}
// Save block meta
@ -187,7 +187,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := wire.BinaryBytes(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@ -222,7 +222,7 @@ type BlockStoreStateJSON struct {
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj)
if err != nil {
PanicSanity(Fmt("Could not marshal state bytes: %v", err))
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
}
db.SetSync(blockStoreKey, bytes)
}
@ -237,7 +237,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj)
if err != nil {
PanicCrisis(Fmt("Could not unmarshal bytes: %X", bytes))
cmn.PanicCrisis(cmn.Fmt("Could not unmarshal bytes: %X", bytes))
}
return bsj
}

+ 5
- 12
cmd/tendermint/commands/reset_priv_validator.go View File

@ -25,10 +25,13 @@ var ResetPrivValidatorCmd = &cobra.Command{
}
// ResetAll removes the privValidator files.
// Exported so other CLI tools can use it
// Exported so other CLI tools can use it.
func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorFS(privValFile, logger)
os.RemoveAll(dbDir)
if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err)
return
}
logger.Info("Removed all data", "dir", dbDir)
}
@ -44,16 +47,6 @@ func resetPrivValidator(cmd *cobra.Command, args []string) {
resetPrivValidatorFS(config.PrivValidatorFile(), logger)
}
// Exported so other CLI tools can use it
func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorLocal(privValFile, logger)
if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err)
return
}
logger.Info("Removed all data", "dir", dbDir)
}
func resetPrivValidatorFS(privValFile string, logger log.Logger) {
// Get PrivValidator
if _, err := os.Stat(privValFile); err == nil {


+ 1
- 4
config/toml_test.go View File

@ -24,10 +24,7 @@ func TestEnsureRoot(t *testing.T) {
// setup temp dir for test
tmpDir, err := ioutil.TempDir("", "config-test")
require.Nil(err)
defer func() {
err := os.RemoveAll(tmpDir)
require.Nil(err)
}()
defer os.RemoveAll(tmpDir) // nolint: errcheck
// create root dir
EnsureRoot(tmpDir)


+ 1
- 1
consensus/byzantine_test.go View File

@ -70,7 +70,7 @@ func TestByzantine(t *testing.T) {
conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus)
var conRI p2p.Reactor // nolint (gotype)
var conRI p2p.Reactor // nolint: gotype
conRI = conR
if i == 0 {


+ 1
- 6
consensus/replay.go View File

@ -115,12 +115,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
} else if err != nil {
return err
} else {
defer func() {
if err := gr.Close(); err != nil {
cs.Logger.Error("Error closing wal Search", "err", err)
return
}
}()
defer gr.Close() // nolint: errcheck
}
if !found {
return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))


+ 1
- 5
consensus/replay_file.go View File

@ -65,11 +65,7 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
defer func() {
if err := pb.fp.Close(); err != nil {
cs.Logger.Error("Error closing new playback", "err", err)
}
}()
defer pb.fp.Close() // nolint: errcheck
var nextN int // apply N msgs in a row
var msg *TimedWALMessage


+ 1
- 6
consensus/replay_test.go View File

@ -490,12 +490,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
}
defer func() {
if err := gr.Close(); err != nil {
wal.Logger.Error("Error closing wal Search", "err", err)
return
}
}()
defer gr.Close()
// log.Notice("Build a blockchain by reading from the WAL")


+ 1
- 1
consensus/state.go View File

@ -372,7 +372,7 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
// enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now()) // nolint (gotype)
sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
}


+ 1
- 1
node/id.go View File

@ -1,4 +1,4 @@
package node // nolint (goimports)
package node
import (
"time"


+ 8
- 40
p2p/connection_test.go View File

@ -32,16 +32,8 @@ func TestMConnectionSend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := netPipe()
defer func() {
if err := server.Close(); err != nil {
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client)
_, err := mconn.Start()
@ -73,16 +65,8 @@ func TestMConnectionReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := netPipe()
defer func() {
if err := server.Close(); err != nil {
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
receivedCh := make(chan []byte)
errorsCh := make(chan interface{})
@ -119,16 +103,8 @@ func TestMConnectionStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := netPipe()
defer func() {
if err := server.Close(); err != nil {
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client)
_, err := mconn.Start()
@ -144,16 +120,8 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := netPipe()
defer func() {
if err := server.Close(); err != nil {
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
receivedCh := make(chan []byte)
errorsCh := make(chan interface{})


+ 1
- 1
p2p/fuzz.go View File

@ -143,7 +143,7 @@ func (fc *FuzzedConnection) fuzz() bool {
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {
// XXX: can't this fail because machine precision?
// XXX: do we need an error?
fc.Close() // nolint (errcheck)
fc.Close() // nolint: errcheck
return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep {
time.Sleep(fc.randomDuration())


+ 5
- 25
p2p/pex_reactor_test.go View File

@ -20,11 +20,7 @@ func TestPEXReactorBasic(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger())
@ -40,11 +36,7 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger())
@ -77,11 +69,7 @@ func TestPEXReactorRunning(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", false)
book.SetLogger(log.TestingLogger())
@ -151,11 +139,7 @@ func TestPEXReactorReceive(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", false)
book.SetLogger(log.TestingLogger())
@ -180,11 +164,7 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger())


+ 5
- 5
p2p/upnp/upnp.go View File

@ -40,7 +40,7 @@ func Discover() (nat NAT, err error) {
return
}
socket := conn.(*net.UDPConn)
defer socket.Close() // nolint (errcheck)
defer socket.Close() // nolint: errcheck
if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil {
return nil, err
@ -197,7 +197,7 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) {
if err != nil {
return
}
defer r.Body.Close() // nolint (errcheck)
defer r.Body.Close() // nolint: errcheck
if r.StatusCode >= 400 {
err = errors.New(string(r.StatusCode))
@ -296,7 +296,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
var response *http.Response
response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain)
if response != nil {
defer response.Body.Close() // nolint (errcheck)
defer response.Body.Close() // nolint: errcheck
}
if err != nil {
return
@ -345,7 +345,7 @@ func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int
var response *http.Response
response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain)
if response != nil {
defer response.Body.Close() // nolint (errcheck)
defer response.Body.Close() // nolint: errcheck
}
if err != nil {
return
@ -371,7 +371,7 @@ func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort
var response *http.Response
response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain)
if response != nil {
defer response.Body.Close() // nolint (errcheck)
defer response.Body.Close() // nolint: errcheck
}
if err != nil {
return


+ 2
- 8
p2p/util.go View File

@ -7,15 +7,9 @@ import (
// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
func doubleSha256(b []byte) []byte {
hasher := sha256.New()
_, err := hasher.Write(b)
if err != nil {
panic(err)
}
_, _ := hasher.Write(b) // error ignored
sum := hasher.Sum(nil)
hasher.Reset()
_, err = hasher.Write(sum)
if err != nil {
panic(err)
}
_, _ = hasher.Write(sum) // error ignored
return hasher.Sum(nil)
}

+ 2
- 2
rpc/client/mock/abci.go View File

@ -49,7 +49,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error
c := a.App.CheckTx(tx)
// and this gets written in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() // nolint (errcheck)
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
}
@ -58,7 +58,7 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
c := a.App.CheckTx(tx)
// and this gets written in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() // nolint (errcheck)
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
}


+ 1
- 1
rpc/grpc/client_server.go View File

@ -25,7 +25,7 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) {
grpcServer := grpc.NewServer()
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
go grpcServer.Serve(ln) // nolint (errcheck)
go grpcServer.Serve(ln) // nolint: errcheck
return ln, nil
}


+ 2
- 2
rpc/lib/client/http_client.go View File

@ -93,7 +93,7 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul
if err != nil {
return nil, err
}
defer httpResponse.Body.Close() // nolint (errcheck)
defer httpResponse.Body.Close() // nolint: errcheck
responseBytes, err := ioutil.ReadAll(httpResponse.Body)
if err != nil {
@ -129,7 +129,7 @@ func (c *URIClient) Call(method string, params map[string]interface{}, result in
if err != nil {
return nil, err
}
defer resp.Body.Close() // nolint (errcheck)
defer resp.Body.Close() // nolint: errcheck
responseBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {


+ 2
- 2
rpc/lib/client/ws_client.go View File

@ -354,7 +354,7 @@ func (c *WSClient) writeRoutine() {
ticker.Stop()
if err := c.conn.Close(); err != nil {
// ignore error; it will trigger in tests
// likely because it's closing and already closed connection
// likely because it's closing an already closed connection
}
c.wg.Done()
}()
@ -406,7 +406,7 @@ func (c *WSClient) readRoutine() {
defer func() {
if err := c.conn.Close(); err != nil {
// ignore error; it will trigger in tests
// likely because it's closing and already closed connection
// likely because it's closing an already closed connection
}
c.wg.Done()
}()


+ 1
- 5
rpc/lib/client/ws_client_test.go View File

@ -34,11 +34,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err != nil {
panic(err)
}
defer func() {
if err := conn.Close(); err != nil {
panic(err)
}
}()
defer conn.Close() // nolint: errcheck
for {
messageType, _, err := conn.ReadMessage()
if err != nil {


+ 1
- 4
rpc/lib/server/handlers.go View File

@ -782,8 +782,5 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st
buf.WriteString("</body></html>")
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(200)
_, err := w.Write(buf.Bytes())
if err != nil {
// ignore error
}
_, _ := w.Write(buf.Bytes()) // error ignored
}

+ 2
- 2
rpc/lib/server/http_server.go View File

@ -56,7 +56,7 @@ func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RP
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpCode)
_, _ = w.Write(jsonBytes) // ignoring error
_, _ = w.Write(jsonBytes) // error ignored
}
func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
@ -66,7 +66,7 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
_, _ = w.Write(jsonBytes) // ignoring error
_, _ = w.Write(jsonBytes) // error ignored
}
//-----------------------------------------------------------------------------


+ 1
- 5
state/txindex/kv/kv_test.go View File

@ -40,11 +40,7 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
if err != nil {
b.Fatal(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
b.Fatal(err)
}
}()
defer os.RemoveAll(dir) // nolint: errcheck
store := db.NewDB("tx_index", "leveldb", dir)
indexer := &TxIndex{store: store}


+ 1
- 1
types/part_set.go View File

@ -34,7 +34,7 @@ func (part *Part) Hash() []byte {
return part.hash
} else {
hasher := ripemd160.New()
_, _ = hasher.Write(part.Bytes) // ignoring error
_, _ = hasher.Write(part.Bytes) // error ignored
part.hash = hasher.Sum(nil)
return part.hash
}


Loading…
Cancel
Save