Browse Source

remove or comment out unused code

pull/3261/head
Anton Kaliaev 6 years ago
parent
commit
ffd3bf8448
No known key found for this signature in database GPG Key ID: 7B6881D965918214
10 changed files with 179 additions and 201 deletions
  1. +0
    -2
      .golangci.yml
  2. +17
    -17
      blockchain/pool.go
  3. +29
    -29
      consensus/common_test.go
  4. +0
    -4
      consensus/state_test.go
  5. +11
    -11
      crypto/merkle/proof_test.go
  6. +78
    -79
      lite/proxy/query_test.go
  7. +0
    -9
      p2p/conn/secret_connection_test.go
  8. +3
    -5
      p2p/switch.go
  9. +41
    -41
      p2p/trust/metric_test.go
  10. +0
    -4
      state/state_test.go

+ 0
- 2
.golangci.yml View File

@ -26,8 +26,6 @@ linters:
- stylecheck - stylecheck
- deadcode - deadcode
- prealloc - prealloc
- unused
- gosimple
# linters-settings: # linters-settings:
# govet: # govet:


+ 17
- 17
blockchain/pool.go View File

@ -363,23 +363,23 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
pool.errorsCh <- peerError{err, peerID} pool.errorsCh <- peerError{err, peerID}
} }
// unused by tendermint; left for debugging purposes
func (pool *BlockPool) debug() string {
pool.mtx.Lock()
defer pool.mtx.Unlock()
str := ""
nextHeight := pool.height + pool.requestersLen()
for h := pool.height; h < nextHeight; h++ {
if pool.requesters[h] == nil {
str += fmt.Sprintf("H(%v):X ", h)
} else {
str += fmt.Sprintf("H(%v):", h)
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
}
}
return str
}
// for debugging purposes
// func (pool *BlockPool) debug() string {
// pool.mtx.Lock()
// defer pool.mtx.Unlock()
// str := ""
// nextHeight := pool.height + pool.requestersLen()
// for h := pool.height; h < nextHeight; h++ {
// if pool.requesters[h] == nil {
// str += fmt.Sprintf("H(%v):X ", h)
// } else {
// str += fmt.Sprintf("H(%v):", h)
// str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
// }
// }
// return str
// }
//------------------------------------- //-------------------------------------


+ 29
- 29
consensus/common_test.go View File

@ -378,35 +378,35 @@ func ensureNewEvent(
} }
} }
func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) {
ensureNewEvent(
stepCh,
height,
round,
ensureTimeout,
"Timeout expired while waiting for NewStep event")
}
func ensureNewVote(voteCh <-chan interface{}, height int64, round int) {
select {
case <-time.After(ensureTimeout):
break
case v := <-voteCh:
edv, ok := v.(types.EventDataVote)
if !ok {
panic(fmt.Sprintf("expected a *types.Vote, "+
"got %v. wrong subscription channel?",
reflect.TypeOf(v)))
}
vote := edv.Vote
if vote.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
}
if vote.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
}
}
}
// func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) {
// ensureNewEvent(
// stepCh,
// height,
// round,
// ensureTimeout,
// "Timeout expired while waiting for NewStep event")
// }
// func ensureNewVote(voteCh <-chan interface{}, height int64, round int) {
// select {
// case <-time.After(ensureTimeout):
// break
// case v := <-voteCh:
// edv, ok := v.(types.EventDataVote)
// if !ok {
// panic(fmt.Sprintf("expected a *types.Vote, "+
// "got %v. wrong subscription channel?",
// reflect.TypeOf(v)))
// }
// vote := edv.Vote
// if vote.Height != height {
// panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
// }
// if vote.Round != round {
// panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
// }
// }
// }
func ensureNewRound(roundCh <-chan interface{}, height int64, round int) { func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
select { select {


+ 0
- 4
consensus/state_test.go View File

@ -22,10 +22,6 @@ func init() {
config = ResetConfig("consensus_state_test") config = ResetConfig("consensus_state_test")
} }
func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration {
return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond
}
/* /*
ProposeSuite ProposeSuite


+ 11
- 11
crypto/merkle/proof_test.go View File

@ -26,17 +26,17 @@ func NewDominoOp(key, input, output string) DominoOp {
} }
} }
func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
if pop.Type != ProofOpDomino {
panic("unexpected proof op type")
}
var op DominoOp // a bit strange as we'll discard this, but it works.
err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp")
}
return NewDominoOp(string(pop.Key), op.Input, op.Output), nil
}
// func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
// if pop.Type != ProofOpDomino {
// panic("unexpected proof op type")
// }
// var op DominoOp // a bit strange as we'll discard this, but it works.
// err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
// if err != nil {
// return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp")
// }
// return NewDominoOp(string(pop.Key), op.Input, op.Output), nil
// }
func (dop DominoOp) ProofOp() ProofOp { func (dop DominoOp) ProofOp() ProofOp {
bz := amino.MustMarshalBinaryLengthPrefixed(dop) bz := amino.MustMarshalBinaryLengthPrefixed(dop)


+ 78
- 79
lite/proxy/query_test.go View File

@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"os" "os"
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -21,7 +20,7 @@ import (
var node *nm.Node var node *nm.Node
var chainID = "tendermint_test" // TODO use from config. var chainID = "tendermint_test" // TODO use from config.
var waitForEventTimeout = 5 * time.Second
// var waitForEventTimeout = 5 * time.Second
// TODO fix tests!! // TODO fix tests!!
@ -42,83 +41,83 @@ func kvstoreTx(k, v []byte) []byte {
// TODO: enable it after general proof format has been adapted // TODO: enable it after general proof format has been adapted
// in abci/examples/kvstore.go // in abci/examples/kvstore.go
func _TestAppProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t)
prt := defaultProofRuntime()
cl := client.NewLocal(node)
client.WaitForHeight(cl, 1, nil)
// This sets up our trust on the node based on some past point.
source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, 1, 1)
require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// Wait for tx confirmation.
done := make(chan int64)
go func() {
evtTyp := types.EventTx
_, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout)
require.Nil(err, "%#v", err)
close(done)
}()
// Submit a transaction.
k := []byte("my-key")
v := []byte("my-value")
tx := kvstoreTx(k, v)
br, err := cl.BroadcastTxCommit(tx)
require.NoError(err, "%#v", err)
require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
require.EqualValues(0, br.DeliverTx.Code)
brh := br.Height
// Fetch latest after tx commit.
<-done
latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1)
require.NoError(err, "%#v", err)
rootHash := latest.SignedHeader.AppHash
if rootHash == nil {
// Fetch one block later, AppHash hasn't been committed yet.
// TODO find a way to avoid doing this.
client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil)
latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1)
require.NoError(err, "%#v", err)
rootHash = latest.SignedHeader.AppHash
}
require.NotNil(rootHash)
// verify a query before the tx block has no data (and valid non-exist proof)
bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert)
require.NoError(err, "%#v", err)
// require.NotNil(proof)
// TODO: Ensure that *some* keys will be there, ensuring that proof is nil,
// (currently there's a race condition)
// and ensure that proof proves absence of k.
require.Nil(bs)
// but given that block it is good
bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert)
require.NoError(err, "%#v", err)
require.NotNil(proof)
require.Equal(height, brh)
assert.EqualValues(v, bs)
err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding
assert.NoError(err, "%#v", err)
// Test non-existing key.
missing := []byte("my-missing-key")
bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert)
require.NoError(err)
require.Nil(bs)
require.NotNil(proof)
err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding
assert.NoError(err, "%#v", err)
err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding
assert.Error(err, "%#v", err)
}
// func TestAppProofs(t *testing.T) {
// assert, require := assert.New(t), require.New(t)
// prt := defaultProofRuntime()
// cl := client.NewLocal(node)
// client.WaitForHeight(cl, 1, nil)
// // This sets up our trust on the node based on some past point.
// source := certclient.NewProvider(chainID, cl)
// seed, err := source.LatestFullCommit(chainID, 1, 1)
// require.NoError(err, "%#v", err)
// cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// // Wait for tx confirmation.
// done := make(chan int64)
// go func() {
// evtTyp := types.EventTx
// _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout)
// require.Nil(err, "%#v", err)
// close(done)
// }()
// // Submit a transaction.
// k := []byte("my-key")
// v := []byte("my-value")
// tx := kvstoreTx(k, v)
// br, err := cl.BroadcastTxCommit(tx)
// require.NoError(err, "%#v", err)
// require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
// require.EqualValues(0, br.DeliverTx.Code)
// brh := br.Height
// // Fetch latest after tx commit.
// <-done
// latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1)
// require.NoError(err, "%#v", err)
// rootHash := latest.SignedHeader.AppHash
// if rootHash == nil {
// // Fetch one block later, AppHash hasn't been committed yet.
// // TODO find a way to avoid doing this.
// client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil)
// latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1)
// require.NoError(err, "%#v", err)
// rootHash = latest.SignedHeader.AppHash
// }
// require.NotNil(rootHash)
// // verify a query before the tx block has no data (and valid non-exist proof)
// bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert)
// require.NoError(err, "%#v", err)
// // require.NotNil(proof)
// // TODO: Ensure that *some* keys will be there, ensuring that proof is nil,
// // (currently there's a race condition)
// // and ensure that proof proves absence of k.
// require.Nil(bs)
// // but given that block it is good
// bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert)
// require.NoError(err, "%#v", err)
// require.NotNil(proof)
// require.Equal(height, brh)
// assert.EqualValues(v, bs)
// err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding
// assert.NoError(err, "%#v", err)
// // Test non-existing key.
// missing := []byte("my-missing-key")
// bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert)
// require.NoError(err)
// require.Nil(bs)
// require.NotNil(proof)
// err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding
// assert.NoError(err, "%#v", err)
// err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding
// assert.Error(err, "%#v", err)
// }
func TestTxProofs(t *testing.T) { func TestTxProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)


+ 0
- 9
p2p/conn/secret_connection_test.go View File

@ -398,12 +398,3 @@ func BenchmarkSecretConnection(b *testing.B) {
} }
//barSecConn.Close() race condition //barSecConn.Close() race condition
} }
func fingerprint(bz []byte) []byte {
const fbsize = 40
if len(bz) < fbsize {
return bz
} else {
return bz[:fbsize]
}
}

+ 3
- 5
p2p/switch.go View File

@ -480,14 +480,12 @@ func (sw *Switch) acceptRoutine() {
metrics: sw.metrics, metrics: sw.metrics,
}) })
if err != nil { if err != nil {
switch err.(type) {
switch err := err.(type) {
case ErrRejected: case ErrRejected:
rErr := err.(ErrRejected)
if rErr.IsSelf() {
if err.IsSelf() {
// Remove the given address from the address book and add to our addresses // Remove the given address from the address book and add to our addresses
// to avoid dialing in the future. // to avoid dialing in the future.
addr := rErr.Addr()
addr := err.Addr()
sw.addrBook.RemoveAddress(&addr) sw.addrBook.RemoveAddress(&addr)
sw.addrBook.AddOurAddress(&addr) sw.addrBook.AddOurAddress(&addr)
} }


+ 41
- 41
p2p/trust/metric_test.go View File

@ -65,44 +65,44 @@ func TestTrustMetricCopyNilPointer(t *testing.T) {
} }
// XXX: This test fails non-deterministically // XXX: This test fails non-deterministically
func _TestTrustMetricStopPause(t *testing.T) {
// The TestTicker will provide manual control over
// the passing of time within the metric
tt := NewTestTicker()
tm := NewMetric()
tm.SetTicker(tt)
tm.Start()
// Allow some time intervals to pass and pause
tt.NextTick()
tt.NextTick()
tm.Pause()
// could be 1 or 2 because Pause and NextTick race
first := tm.Copy().numIntervals
// Allow more time to pass and check the intervals are unchanged
tt.NextTick()
tt.NextTick()
assert.Equal(t, first, tm.Copy().numIntervals)
// Get the trust metric activated again
tm.GoodEvents(5)
// Allow some time intervals to pass and stop
tt.NextTick()
tt.NextTick()
tm.Stop()
tm.Wait()
second := tm.Copy().numIntervals
// Allow more intervals to pass while the metric is stopped
// and check that the number of intervals match
tm.NextTimeInterval()
tm.NextTimeInterval()
// XXX: fails non-deterministically:
// expected 5, got 6
assert.Equal(t, second+2, tm.Copy().numIntervals)
if first > second {
t.Fatalf("numIntervals should always increase or stay the same over time")
}
}
// func _TestTrustMetricStopPause(t *testing.T) {
// // The TestTicker will provide manual control over
// // the passing of time within the metric
// tt := NewTestTicker()
// tm := NewMetric()
// tm.SetTicker(tt)
// tm.Start()
// // Allow some time intervals to pass and pause
// tt.NextTick()
// tt.NextTick()
// tm.Pause()
// // could be 1 or 2 because Pause and NextTick race
// first := tm.Copy().numIntervals
// // Allow more time to pass and check the intervals are unchanged
// tt.NextTick()
// tt.NextTick()
// assert.Equal(t, first, tm.Copy().numIntervals)
// // Get the trust metric activated again
// tm.GoodEvents(5)
// // Allow some time intervals to pass and stop
// tt.NextTick()
// tt.NextTick()
// tm.Stop()
// tm.Wait()
// second := tm.Copy().numIntervals
// // Allow more intervals to pass while the metric is stopped
// // and check that the number of intervals match
// tm.NextTimeInterval()
// tm.NextTimeInterval()
// // XXX: fails non-deterministically:
// // expected 5, got 6
// assert.Equal(t, second+2, tm.Copy().numIntervals)
// if first > second {
// t.Fatalf("numIntervals should always increase or stay the same over time")
// }
// }

+ 0
- 4
state/state_test.go View File

@ -938,10 +938,6 @@ func makeParams(blockBytes, blockGas, evidenceAge int64) types.ConsensusParams {
} }
} }
func pk() []byte {
return ed25519.GenPrivKey().PubKey().Bytes()
}
func TestApplyUpdates(t *testing.T) { func TestApplyUpdates(t *testing.T) {
initParams := makeParams(1, 2, 3) initParams := makeParams(1, 2, 3)


Loading…
Cancel
Save