Browse Source

core: apply megacheck vet tool (unused, gosimple, staticcheck)

pull/522/head
zramsay 7 years ago
parent
commit
cf31f8d06f
24 changed files with 67 additions and 177 deletions
  1. +7
    -8
      blockchain/pool.go
  2. +5
    -5
      blockchain/reactor.go
  3. +1
    -0
      consensus/byzantine_test.go
  4. +0
    -21
      consensus/common_test.go
  5. +3
    -0
      consensus/height_vote_set_test.go
  6. +1
    -1
      consensus/reactor.go
  7. +4
    -1
      consensus/replay.go
  8. +0
    -61
      consensus/replay_test.go
  9. +11
    -14
      consensus/state.go
  10. +14
    -4
      consensus/state_test.go
  11. +0
    -3
      p2p/addrbook.go
  12. +4
    -4
      p2p/connection.go
  13. +1
    -1
      p2p/listener.go
  14. +0
    -1
      p2p/netaddress.go
  15. +0
    -1
      p2p/pex_reactor.go
  16. +0
    -14
      p2p/secret_connection.go
  17. +4
    -5
      p2p/upnp/probe.go
  18. +5
    -5
      p2p/upnp/upnp.go
  19. +0
    -2
      rpc/test/client_test.go
  20. +0
    -12
      state/execution.go
  21. +0
    -4
      state/state.go
  22. +2
    -2
      state/txindex/kv/kv_test.go
  23. +1
    -4
      types/block.go
  24. +4
    -4
      types/validator_set.go

+ 7
- 8
blockchain/pool.go View File

@ -142,7 +142,7 @@ func (pool *BlockPool) IsCaughtUp() bool {
maxPeerHeight = MaxInt(maxPeerHeight, peer.height)
}
isCaughtUp := (height > 0 || time.Now().Sub(pool.startTime) > 5*time.Second) && (maxPeerHeight == 0 || height >= maxPeerHeight)
isCaughtUp := (height > 0 || time.Since(pool.startTime) > 5*time.Second) && (maxPeerHeight == 0 || height >= maxPeerHeight)
pool.Logger.Info(Fmt("IsCaughtUp: %v", isCaughtUp), "height", height, "maxPeerHeight", maxPeerHeight)
return isCaughtUp
}
@ -212,9 +212,9 @@ func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int
pool.numPending--
peer := pool.peers[peerID]
peer.decrPending(blockSize)
} else {
// Bad peer?
}
} // else {
// Bad peer?
// }
}
// Sets the peer's alleged blockchain height.
@ -261,7 +261,6 @@ func (pool *BlockPool) pickIncrAvailablePeer(minHeight int) *bpPeer {
if peer.didTimeout {
pool.removePeer(peer.id)
continue
} else {
}
if peer.numPending >= maxPendingRequestsPerPeer {
continue
@ -303,7 +302,8 @@ func (pool *BlockPool) sendTimeout(peerID string) {
pool.timeoutsCh <- peerID
}
func (pool *BlockPool) debug() string {
// unused by tendermint; left for debugging purposes
/*func (pool *BlockPool) debug() string {
pool.mtx.Lock() // Lock
defer pool.mtx.Unlock()
@ -317,7 +317,7 @@ func (pool *BlockPool) debug() string {
}
}
return str
}
}*/
//-------------------------------------
@ -326,7 +326,6 @@ type bpPeer struct {
id string
recvMonitor *flow.Monitor
mtx sync.Mutex
height int
numPending int32
timeout *time.Timer


+ 5
- 5
blockchain/reactor.go View File

@ -19,7 +19,6 @@ const (
BlockchainChannel = byte(0x40)
defaultChannelCapacity = 100
defaultSleepIntervalMS = 500
trySyncIntervalMS = 100
// stop syncing when last block's time is
// within this much of the system time.
@ -49,7 +48,6 @@ type BlockchainReactor struct {
fastSync bool
requestsCh chan BlockRequest
timeoutsCh chan string
lastBlock *types.Block
evsw types.EventSwitch
}
@ -134,6 +132,8 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
// [zr] note: the `megacheck` tool (go vet on steroids)
// really dislikes the logic in this switch
switch msg := msg.(type) {
case *bcBlockRequestMessage:
// Got a request for a block. Respond with block if we have it.
@ -194,10 +194,10 @@ FOR_LOOP:
if peer != nil {
bcR.Switch.StopPeerForError(peer, errors.New("BlockchainReactor Timeout"))
}
case _ = <-statusUpdateTicker.C:
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest()
case _ = <-switchToConsensusTicker.C:
case <-switchToConsensusTicker.C:
height, numPending, _ := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
bcR.Logger.Info("Consensus ticker", "numPending", numPending, "total", len(bcR.pool.requesters),
@ -211,7 +211,7 @@ FOR_LOOP:
break FOR_LOOP
}
case _ = <-trySyncTicker.C: // chan time
case <-trySyncTicker.C: // chan time
// This loop can be slow as long as it's doing syncing work.
SYNC_LOOP:
for i := 0; i < 10; i++ {


+ 1
- 0
consensus/byzantine_test.go View File

@ -77,6 +77,7 @@ func TestByzantine(t *testing.T) {
var conRI p2p.Reactor
conRI = conR
if i == 0 {
conRI = NewByzantineReactor(conR)
}


+ 0
- 21
consensus/common_test.go View File

@ -222,17 +222,6 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
return voteCh
}
func readVotes(ch chan interface{}, reads int) chan struct{} {
wg := make(chan struct{})
go func() {
for i := 0; i < reads; i++ {
<-ch // read the precommit event
}
close(wg)
}()
return wg
}
//-------------------------------------------------------------------------------
// consensus states
@ -274,16 +263,6 @@ func loadPrivValidator(config *cfg.Config) *types.PrivValidator {
return privValidator
}
func fixedConsensusState() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
cs.SetLogger(log.TestingLogger())
return cs
}
func fixedConsensusStateDummy() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())


+ 3
- 0
consensus/height_vote_set_test.go View File

@ -30,6 +30,9 @@ func TestPeerCatchupRounds(t *testing.T) {
vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0)
added, err = hvs.AddVote(vote1001_0, "peer1")
if err != nil {
t.Error("AddVote error", err)
}
if added {
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
}


+ 1
- 1
consensus/reactor.go View File

@ -351,7 +351,7 @@ func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *
Height: rs.Height,
Round: rs.Round,
Step: rs.Step,
SecondsSinceStartTime: int(time.Now().Sub(rs.StartTime).Seconds()),
SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()),
LastCommitRound: rs.LastCommit.Round(),
}
if rs.Step == RoundStepCommit {


+ 4
- 1
consensus/replay.go View File

@ -104,6 +104,9 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
// NOTE: This is just a sanity check. As far as we know things work fine without it,
// and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT).
gr, found, err := cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight))
if err != nil {
return err
}
if gr != nil {
gr.Close()
}
@ -132,7 +135,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
if !found {
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
gr, _, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil


+ 0
- 61
consensus/replay_test.go View File

@ -557,67 +557,6 @@ func readPieceFromWAL(msgBytes []byte) (interface{}, error) {
return nil, nil
}
// make some bogus txs
func txsFunc(blockNum int) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
}
return txs
}
// sign a commit vote
func signCommit(chainID string, privVal *types.PrivValidator, height, round int, hash []byte, header types.PartSetHeader) *types.Vote {
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: privVal.Address,
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{hash, header},
}
sig := privVal.Sign(types.SignBytes(chainID, vote))
vote.Signature = sig
return vote
}
// make a blockchain with one validator
func makeBlockchain(t *testing.T, chainID string, nBlocks int, privVal *types.PrivValidator, proxyApp proxy.AppConns, state *sm.State) (blockchain []*types.Block, commits []*types.Commit) {
prevHash := state.LastBlockID.Hash
lastCommit := new(types.Commit)
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
for i := 1; i < nBlocks+1; i++ {
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit,
prevBlockID, valHash, state.AppHash, testPartSize)
fmt.Println(i)
fmt.Println(block.LastBlockID)
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(i, err)
}
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators)
vote := signCommit(chainID, privVal, i, 0, block.Hash(), parts.Header())
_, err = voteSet.AddVote(vote)
if err != nil {
t.Fatal(err)
}
prevHash = block.Hash()
prevParts = parts.Header()
lastCommit = voteSet.MakeCommit()
prevBlockID = types.BlockID{prevHash, prevParts}
blockchain = append(blockchain, block)
commits = append(commits, lastCommit)
}
return blockchain, commits
}
// fresh state and mock store
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()


+ 11
- 14
consensus/state.go View File

@ -648,10 +648,10 @@ func (cs *ConsensusState) handleMsg(mi msgInfo, rs RoundState) {
case *VoteMessage:
// attempt to add the vote and dupeout the validator if its a duplicate signature
// if the vote gives us a 2/3-any or 2/3-one, we transition
err := cs.tryAddVote(msg.Vote, peerKey)
if err == ErrAddingVote {
// TODO: punish peer
}
_ = cs.tryAddVote(msg.Vote, peerKey)
//if err == ErrAddingVote {
// TODO: punish peer
//}
// NOTE: the vote is broadcast to peers by the reactor listening
// for vote events
@ -897,10 +897,10 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
// fire event for how we got here
if cs.isProposalComplete() {
types.FireEventCompleteProposal(cs.evsw, cs.RoundStateEvent())
} else {
// we received +2/3 prevotes for a future round
// TODO: catchup event?
}
} // else {
// we received +2/3 prevotes for a future round
// TODO: catchup event?
//}
cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
@ -939,7 +939,6 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
// NOTE: the proposal signature is validated when it is received,
// and the proposal block parts are validated as they are received (against the merkle hash in the proposal)
cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
return
}
// Enter: any +2/3 prevotes at next round.
@ -1059,7 +1058,6 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
}
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
return
}
// Enter: any +2/3 precommits for next round.
@ -1124,9 +1122,9 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
// Set up ProposalBlockParts and keep waiting.
cs.ProposalBlock = nil
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
} else {
// We just need to keep waiting.
}
} // else {
// We just need to keep waiting.
//}
}
}
@ -1250,7 +1248,6 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// * cs.Height has been increment to height+1
// * cs.Step is now RoundStepNewHeight
// * cs.StartTime is set to when we will start round0.
return
}
//-----------------------------------------------------------------------------


+ 14
- 4
consensus/state_test.go View File

@ -523,7 +523,10 @@ func TestLockPOLRelock(t *testing.T) {
<-voteCh // prevote
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
// prevotes
<-voteCh
<-voteCh
<-voteCh
<-voteCh // our precommit
// the proposed block should now be locked and our precommit added
@ -532,7 +535,10 @@ func TestLockPOLRelock(t *testing.T) {
// add precommits from the rest
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // precommits
// precommites
<-voteCh
<-voteCh
<-voteCh
// before we timeout to the new round set the new proposal
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
@ -570,7 +576,10 @@ func TestLockPOLRelock(t *testing.T) {
// now lets add prevotes from everyone else for the new block
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
// prevotes
<-voteCh
<-voteCh
<-voteCh
// now either we go to PrevoteWait or Precommit
select {
@ -585,7 +594,8 @@ func TestLockPOLRelock(t *testing.T) {
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
_, _ = <-voteCh, <-voteCh
<-voteCh
<-voteCh
be := <-newBlockCh
b := be.(types.TMEventData).Unwrap().(types.EventDataNewBlockHeader)


+ 0
- 3
p2p/addrbook.go View File

@ -68,9 +68,6 @@ const (
// max addresses returned by GetSelection
// NOTE: this must match "maxPexMessageSize"
maxGetSelection = 250
// current version of the on-disk format.
serializationVersion = 1
)
const (


+ 4
- 4
p2p/connection.go View File

@ -366,9 +366,9 @@ func (c *MConnection) sendMsgPacket() bool {
// Nothing to send?
if leastChannel == nil {
return true
} else {
// c.Logger.Info("Found a msgPacket to send")
}
} // else {
// c.Logger.Info("Found a msgPacket to send")
//}
// Make & send a msgPacket from this channel
n, err := leastChannel.writeMsgPacketTo(c.bufWriter)
@ -468,7 +468,7 @@ FOR_LOOP:
// Cleanup
close(c.pong)
for _ = range c.pong {
for range c.pong {
// Drain
}
}


+ 1
- 1
p2p/listener.go View File

@ -135,7 +135,7 @@ func (l *DefaultListener) listenRoutine() {
// Cleanup
close(l.connections)
for _ = range l.connections {
for range l.connections {
// Drain
}
}


+ 0
- 1
p2p/netaddress.go View File

@ -174,7 +174,6 @@ func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
Ipv6_weak
Ipv4
Ipv6_strong
Private
)
if !na.Routable() {
return Unreachable


+ 0
- 1
p2p/pex_reactor.go View File

@ -44,7 +44,6 @@ const (
type PEXReactor struct {
BaseReactor
sw *Switch
book *AddrBook
ensurePeersPeriod time.Duration


+ 0
- 14
p2p/secret_connection.go View File

@ -293,10 +293,6 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKeyEd25519, signa
return &recvMsg, nil
}
func verifyChallengeSignature(challenge *[32]byte, remPubKey crypto.PubKeyEd25519, remSignature crypto.SignatureEd25519) bool {
return remPubKey.VerifyBytes(challenge[:], remSignature.Wrap())
}
//--------------------------------------------------------------------------------
// sha256
@ -319,16 +315,6 @@ func hash24(input []byte) (res *[24]byte) {
return
}
// ripemd160
func hash20(input []byte) (res *[20]byte) {
hasher := ripemd160.New()
hasher.Write(input) // does not error
resSlice := hasher.Sum(nil)
res = new([20]byte)
copy(res[:], resSlice)
return
}
// increment nonce big-endian by 2 with wraparound.
func incr2Nonce(nonce *[24]byte) {
incrNonce(nonce)


+ 4
- 5
p2p/upnp/probe.go View File

@ -1,7 +1,6 @@
package upnp
import (
"errors"
"fmt"
"net"
"time"
@ -18,26 +17,26 @@ type UPNPCapabilities struct {
func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) {
nat, err := Discover()
if err != nil {
return nil, nil, nil, errors.New(fmt.Sprintf("NAT upnp could not be discovered: %v", err))
return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %v", err)
}
logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP))
ext, err := nat.GetExternalAddress()
if err != nil {
return nat, nil, nil, errors.New(fmt.Sprintf("External address error: %v", err))
return nat, nil, nil, fmt.Errorf("External address error: %v", err)
}
logger.Info(cmn.Fmt("External address: %v", ext))
port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0)
if err != nil {
return nat, nil, ext, errors.New(fmt.Sprintf("Port mapping error: %v", err))
return nat, nil, ext, fmt.Errorf("Port mapping error: %v", err)
}
logger.Info(cmn.Fmt("Port mapping mapped: %v", port))
// also run the listener, open for all remote addresses.
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort))
if err != nil {
return nat, nil, ext, errors.New(fmt.Sprintf("Error establishing listener: %v", err))
return nat, nil, ext, fmt.Errorf("Error establishing listener: %v", err)
}
return nat, listener, ext, nil
}


+ 5
- 5
p2p/upnp/upnp.go View File

@ -65,14 +65,14 @@ func Discover() (nat NAT, err error) {
return
}
var n int
n, _, err = socket.ReadFromUDP(answerBytes)
_, _, err = socket.ReadFromUDP(answerBytes)
for {
n, _, err = socket.ReadFromUDP(answerBytes)
if err != nil {
break
}
answer := string(answerBytes[0:n])
if strings.Index(answer, st) < 0 {
if !strings.Contains(answer, st) {
continue
}
// HTTP header field names are case-insensitive.
@ -153,7 +153,7 @@ type Root struct {
func getChildDevice(d *Device, deviceType string) *Device {
dl := d.DeviceList.Device
for i := 0; i < len(dl); i++ {
if strings.Index(dl[i].DeviceType, deviceType) >= 0 {
if strings.Contains(dl[i].DeviceType, deviceType) {
return &dl[i]
}
}
@ -163,7 +163,7 @@ func getChildDevice(d *Device, deviceType string) *Device {
func getChildService(d *Device, serviceType string) *UPNPService {
sl := d.ServiceList.Service
for i := 0; i < len(sl); i++ {
if strings.Index(sl[i].ServiceType, serviceType) >= 0 {
if strings.Contains(sl[i].ServiceType, serviceType) {
return &sl[i]
}
}
@ -211,7 +211,7 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) {
return
}
a := &root.Device
if strings.Index(a.DeviceType, "InternetGatewayDevice:1") < 0 {
if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") {
err = errors.New("No InternetGatewayDevice")
return
}


+ 0
- 2
rpc/test/client_test.go View File

@ -238,8 +238,6 @@ func testTx(t *testing.T, client rpc.HTTPClient, withIndexer bool) {
//--------------------------------------------------------------------------------
// Test the websocket service
var wsTyp = "JSONRPC"
// make a simple connection to the server
func TestWSConnect(t *testing.T) {
wsc := GetWSClient()


+ 0
- 12
state/execution.go View File

@ -155,18 +155,6 @@ func updateValidators(validators *types.ValidatorSet, changedValidators []*abci.
return nil
}
// return a bit array of validators that signed the last commit
// NOTE: assumes commits have already been authenticated
func commitBitArrayFromBlock(block *types.Block) *cmn.BitArray {
signed := cmn.NewBitArray(len(block.LastCommit.Precommits))
for i, precommit := range block.LastCommit.Precommits {
if precommit != nil {
signed.SetIndex(i, true) // val_.LastCommitHeight = block.Height - 1
}
}
return signed
}
//-----------------------------------------------------
// Validate block


+ 0
- 4
state/state.go View File

@ -46,10 +46,6 @@ type State struct {
TxIndexer txindex.TxIndexer `json:"-"` // Transaction indexer.
// Intermediate results from processing
// Persisted separately from the state
abciResponses *ABCIResponses
logger log.Logger
}


+ 2
- 2
state/txindex/kv/kv_test.go View File

@ -8,9 +8,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
db "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
db "github.com/tendermint/tmlibs/db"
)
func TestTxIndex(t *testing.T) {
@ -52,7 +52,7 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
err = indexer.AddBatch(batch)
_ = indexer.AddBatch(batch)
}
}


+ 1
- 4
types/block.go View File

@ -282,10 +282,7 @@ func (commit *Commit) GetByIndex(index int) *Vote {
}
func (commit *Commit) IsCommit() bool {
if len(commit.Precommits) == 0 {
return false
}
return true
return len(commit.Precommits) == 0
}
func (commit *Commit) ValidateBasic() error {


+ 4
- 4
types/validator_set.go View File

@ -85,14 +85,14 @@ func (valSet *ValidatorSet) HasAddress(address []byte) bool {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
return idx != len(valSet.Validators) && bytes.Compare(valSet.Validators[idx].Address, address) == 0
return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
}
func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx != len(valSet.Validators) && bytes.Compare(valSet.Validators[idx].Address, address) == 0 {
if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
return idx, valSet.Validators[idx].Copy()
} else {
return 0, nil
@ -159,7 +159,7 @@ func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
valSet.Proposer = nil
valSet.totalVotingPower = 0
return true
} else if bytes.Compare(valSet.Validators[idx].Address, val.Address) == 0 {
} else if bytes.Equal(valSet.Validators[idx].Address, val.Address) {
return false
} else {
newValidators := make([]*Validator, len(valSet.Validators)+1)
@ -191,7 +191,7 @@ func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) || bytes.Compare(valSet.Validators[idx].Address, address) != 0 {
if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
return nil, false
} else {
removedVal := valSet.Validators[idx]


Loading…
Cancel
Save