Browse Source

lint: Enable Golint (#4212)

* Fix many golint errors

* Fix golint errors in the 'lite' package

* Don't export Pool.store

* Fix typo

* Revert unwanted changes

* Fix errors in counter package

* Fix linter errors in kvstore package

* Fix linter error in example package

* Fix error in tests package

* Fix linter errors in v2 package

* Fix linter errors in consensus package

* Fix linter errors in evidence package

* Fix linter error in fail package

* Fix linter errors in query package

* Fix linter errors in core package

* Fix linter errors in node package

* Fix linter errors in mempool package

* Fix linter error in conn package

* Fix linter errors in pex package

* Rename PEXReactor export to Reactor

* Fix linter errors in trust package

* Fix linter errors in upnp package

* Fix linter errors in p2p package

* Fix linter errors in proxy package

* Fix linter errors in mock_test package

* Fix linter error in client_test package

* Fix linter errors in coretypes package

* Fix linter errors in coregrpc package

* Fix linter errors in rpcserver package

* Fix linter errors in rpctypes package

* Fix linter errors in rpctest package

* Fix linter error in json2wal script

* Fix linter error in wal2json script

* Fix linter errors in kv package

* Fix linter error in state package

* Fix linter error in grpc_client

* Fix linter errors in types package

* Fix linter error in version package

* Fix remaining errors

* Address review comments

* Fix broken tests

* Reconcile package coregrpc

* Fix golangci bot error

* Fix new golint errors

* Fix broken reference

* Enable golint linter

* minor changes to bring golint into line

* fix failing test

* fix pex reactor naming

* address PR comments
pull/4221/head
Marko 5 years ago
committed by GitHub
parent
commit
3e2751d274
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
93 changed files with 796 additions and 793 deletions
  1. +8
    -23
      .golangci.yml
  2. +9
    -9
      abci/cmd/abci-cli/abci-cli.go
  3. +9
    -9
      abci/example/counter/counter.go
  4. +1
    -1
      abci/example/example_test.go
  5. +22
    -20
      abci/example/kvstore/kvstore.go
  6. +3
    -3
      abci/example/kvstore/kvstore_test.go
  7. +2
    -2
      abci/example/kvstore/persistent_kvstore.go
  8. +0
    -1
      abci/server/server.go
  9. +1
    -1
      abci/tests/client_server_test.go
  10. +2
    -2
      behaviour/reporter.go
  11. +2
    -2
      behaviour/reporter_test.go
  12. +1
    -1
      blockchain/v2/routine_test.go
  13. +1
    -1
      cmd/tendermint/commands/run_node.go
  14. +9
    -8
      consensus/byzantine_test.go
  15. +1
    -1
      consensus/codec.go
  16. +38
    -31
      consensus/common_test.go
  17. +6
    -6
      consensus/mempool_test.go
  18. +46
    -45
      consensus/reactor.go
  19. +15
    -15
      consensus/reactor_test.go
  20. +3
    -3
      consensus/replay.go
  21. +7
    -7
      consensus/replay_file.go
  22. +15
    -13
      consensus/replay_test.go
  23. +87
    -83
      consensus/state.go
  24. +30
    -30
      consensus/state_test.go
  25. +2
    -1
      consensus/wal.go
  26. +1
    -1
      consensus/wal_generator.go
  27. +4
    -4
      crypto/ed25519/ed25519.go
  28. +2
    -2
      crypto/merkle/simple_tree.go
  29. +1
    -1
      evidence/codec.go
  30. +29
    -29
      evidence/pool.go
  31. +2
    -2
      evidence/pool_test.go
  32. +30
    -30
      evidence/reactor.go
  33. +14
    -14
      evidence/reactor_test.go
  34. +29
    -29
      evidence/store.go
  35. +6
    -6
      evidence/store_test.go
  36. +1
    -1
      libs/events/events_test.go
  37. +9
    -8
      libs/fail/fail.go
  38. +6
    -6
      libs/log/tracing_logger_test.go
  39. +1
    -1
      lite/client/provider_test.go
  40. +21
    -19
      lite/dbprovider.go
  41. +5
    -5
      lite/dynamic_verifier_test.go
  42. +1
    -1
      lite/helpers.go
  43. +5
    -5
      lite/multiprovider.go
  44. +16
    -13
      lite/proxy/query.go
  45. +1
    -1
      lite/proxy/query_test.go
  46. +1
    -1
      lite2/provider/http/http_test.go
  47. +2
    -2
      mempool/bench_test.go
  48. +3
    -3
      mempool/cache_test.go
  49. +13
    -13
      mempool/clist_mempool.go
  50. +10
    -10
      mempool/clist_mempool_test.go
  51. +1
    -1
      mempool/codec.go
  52. +6
    -6
      mempool/reactor.go
  53. +1
    -1
      mempool/reactor_test.go
  54. +5
    -5
      node/id.go
  55. +27
    -27
      node/node.go
  56. +2
    -2
      node/node_test.go
  57. +2
    -2
      p2p/mock/peer.go
  58. +6
    -6
      p2p/node_info.go
  59. +1
    -1
      p2p/node_info_test.go
  60. +1
    -1
      p2p/peer_test.go
  61. +1
    -1
      p2p/pex/codec.go
  62. +40
    -40
      p2p/pex/pex_reactor.go
  63. +24
    -24
      p2p/pex/pex_reactor_test.go
  64. +1
    -1
      p2p/switch_test.go
  65. +1
    -1
      p2p/test_util.go
  66. +5
    -5
      p2p/trust/config.go
  67. +29
    -29
      p2p/trust/metric.go
  68. +2
    -2
      p2p/trust/metric_test.go
  69. +19
    -19
      p2p/trust/store.go
  70. +1
    -1
      p2p/trust/store_test.go
  71. +2
    -2
      p2p/upnp/probe.go
  72. +4
    -4
      p2p/upnp/upnp.go
  73. +3
    -3
      proxy/app_conn_test.go
  74. +3
    -3
      proxy/client.go
  75. +2
    -2
      rpc/client/examples_test.go
  76. +1
    -1
      rpc/client/mock/abci_test.go
  77. +4
    -4
      rpc/client/rpc_test.go
  78. +2
    -2
      rpc/core/pipe.go
  79. +1
    -1
      rpc/grpc/grpc_test.go
  80. +1
    -1
      rpc/lib/client/integration_test.go
  81. +3
    -3
      rpc/test/helpers.go
  82. +1
    -1
      scripts/json2wal/main.go
  83. +1
    -1
      scripts/wal2json/main.go
  84. +1
    -1
      state/execution_test.go
  85. +0
    -6
      state/export_test.go
  86. +1
    -1
      state/state_test.go
  87. +20
    -20
      state/txindex/kv/kv.go
  88. +3
    -3
      test/app/grpc_client.go
  89. +2
    -1
      tools/tm-monitor/monitor/node.go
  90. +13
    -13
      types/evidence.go
  91. +15
    -15
      types/vote_set.go
  92. +4
    -4
      types/vote_set_test.go
  93. +1
    -1
      version/version.go

+ 8
- 23
.golangci.yml View File

@ -15,7 +15,7 @@ linters:
# - godox
- gofmt
- goimports
# - golint
- golint
- gosec
- gosimple
- govet
@ -43,38 +43,23 @@ linters:
issues:
exclude-rules:
- linters:
- lll
source: "https://"
- linters:
- lll
source: "https://"
linters-settings:
dogsled:
max-blank-identifiers: 3
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
# govet:
# check-shadowing: true
# golint:
# min-confidence: 0
# govet:
# check-shadowing: true
golint:
min-confidence: 0
# gocyclo:
# min-complexity: 10
# maligned:
# suggest-new: true
# dupl:
# threshold: 100
# depguard:
# list-type: blacklist
# packages:
# # logging is allowed only by logutils.Log, logrus
# # is allowed to use only in logutils package
# - github.com/sirupsen/logrus
# misspell:
# locale: US
# lll:
# line-length: 140
# goimports:
# local-prefixes: github.com/golangci/golangci-lint
# gocritic:
# enabled-tags:
# - performance


+ 9
- 9
abci/cmd/abci-cli/abci-cli.go View File

@ -310,14 +310,14 @@ func persistentArgs(line []byte) []string {
func compose(fs []func() error) error {
if len(fs) == 0 {
return nil
} else {
err := fs[0]()
if err == nil {
return compose(fs[1:])
} else {
return err
}
}
err := fs[0]()
if err == nil {
return compose(fs[1:])
}
return err
}
func cmdTest(cmd *cobra.Command, args []string) error {
@ -626,7 +626,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
}
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewCounterApplication(flagSerial)
app := counter.NewApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
@ -655,7 +655,7 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
app = kvstore.NewKVStoreApplication()
app = kvstore.NewApplication()
} else {
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))


+ 9
- 9
abci/example/counter/counter.go View File

@ -8,7 +8,7 @@ import (
"github.com/tendermint/tendermint/abci/types"
)
type CounterApplication struct {
type Application struct {
types.BaseApplication
hashCount int
@ -16,15 +16,15 @@ type CounterApplication struct {
serial bool
}
func NewCounterApplication(serial bool) *CounterApplication {
return &CounterApplication{serial: serial}
func NewApplication(serial bool) *Application {
return &Application{serial: serial}
}
func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo {
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption {
key, value := req.Key, req.Value
if key == "serial" && value == "on" {
app.serial = true
@ -42,7 +42,7 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo
return types.ResponseSetOption{}
}
func (app *CounterApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
if app.serial {
if len(req.Tx) > 8 {
return types.ResponseDeliverTx{
@ -62,7 +62,7 @@ func (app *CounterApplication) DeliverTx(req types.RequestDeliverTx) types.Respo
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
if app.serial {
if len(req.Tx) > 8 {
return types.ResponseCheckTx{
@ -81,7 +81,7 @@ func (app *CounterApplication) CheckTx(req types.RequestCheckTx) types.ResponseC
return types.ResponseCheckTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
func (app *Application) Commit() (resp types.ResponseCommit) {
app.hashCount++
if app.txCount == 0 {
return types.ResponseCommit{}
@ -91,7 +91,7 @@ func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
return types.ResponseCommit{Data: hash}
}
func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}


+ 1
- 1
abci/example/example_test.go View File

@ -25,7 +25,7 @@ import (
func TestKVStore(t *testing.T) {
fmt.Println("### Testing KVStore")
testStream(t, kvstore.NewKVStoreApplication())
testStream(t, kvstore.NewApplication())
}
func TestBaseApp(t *testing.T) {


+ 22
- 20
abci/example/kvstore/kvstore.go View File

@ -54,20 +54,20 @@ func prefixKey(key []byte) []byte {
//---------------------------------------------------
var _ types.Application = (*KVStoreApplication)(nil)
var _ types.Application = (*Application)(nil)
type KVStoreApplication struct {
type Application struct {
types.BaseApplication
state State
}
func NewKVStoreApplication() *KVStoreApplication {
func NewApplication() *Application {
state := loadState(dbm.NewMemDB())
return &KVStoreApplication{state: state}
return &Application{state: state}
}
func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
Version: version.ABCIVersion,
@ -76,7 +76,7 @@ func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.Respon
}
// tx is either "key=value" or just arbitrary bytes
func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
var key, value []byte
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 2 {
@ -86,7 +86,7 @@ func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.Respo
}
app.state.db.Set(prefixKey(key), value)
app.state.Size += 1
app.state.Size++
events := []types.Event{
{
@ -101,22 +101,22 @@ func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.Respo
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
func (app *KVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
func (app *KVStoreApplication) Commit() types.ResponseCommit {
func (app *Application) Commit() types.ResponseCommit {
// Using a memdb - just return the big endian size of the db
appHash := make([]byte, 8)
binary.PutVarint(appHash, app.state.Size)
app.state.AppHash = appHash
app.state.Height += 1
app.state.Height++
saveState(app.state)
return types.ResponseCommit{Data: appHash}
}
// Returns an associated value or nil if missing.
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
if reqQuery.Prove {
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Index = -1 // TODO make Proof return index
@ -127,16 +127,18 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type
} else {
resQuery.Log = "does not exist"
}
return
}
resQuery.Key = reqQuery.Data
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Key = reqQuery.Data
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
resQuery.Log = "does not exist"
}
return
}

+ 3
- 3
abci/example/kvstore/kvstore_test.go View File

@ -50,7 +50,7 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri
}
func TestKVStoreKV(t *testing.T) {
kvstore := NewKVStoreApplication()
kvstore := NewApplication()
key := testKey
value := key
tx := []byte(key)
@ -262,7 +262,7 @@ func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, c
func TestClientServer(t *testing.T) {
// set up socket app
kvstore := NewKVStoreApplication()
kvstore := NewApplication()
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
require.Nil(t, err)
defer server.Stop()
@ -271,7 +271,7 @@ func TestClientServer(t *testing.T) {
runClientTests(t, client)
// set up grpc app
kvstore = NewKVStoreApplication()
kvstore = NewApplication()
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
require.Nil(t, err)
defer gserver.Stop()


+ 2
- 2
abci/example/kvstore/persistent_kvstore.go View File

@ -24,7 +24,7 @@ const (
var _ types.Application = (*PersistentKVStoreApplication)(nil)
type PersistentKVStoreApplication struct {
app *KVStoreApplication
app *Application
// validator set
ValUpdates []types.ValidatorUpdate
@ -44,7 +44,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
state := loadState(db)
return &PersistentKVStoreApplication{
app: &KVStoreApplication{state: state},
app: &Application{state: state},
valAddrToPubKeyMap: make(map[string]types.PubKey),
logger: log.NewNopLogger(),
}


+ 0
- 1
abci/server/server.go View File

@ -6,7 +6,6 @@ It contains two server implementation:
* socket server
*/
package server
import (


+ 1
- 1
abci/tests/client_server_test.go View File

@ -13,7 +13,7 @@ import (
func TestClientServerNoAddrPrefix(t *testing.T) {
addr := "localhost:26658"
transport := "socket"
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
server, err := abciserver.NewServer(addr, transport, app)
assert.NoError(t, err, "expected no error on NewServer")


+ 2
- 2
behaviour/reporter.go View File

@ -80,7 +80,7 @@ func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour {
copy(result, items)
return result
} else {
return []PeerBehaviour{}
}
return []PeerBehaviour{}
}

+ 2
- 2
behaviour/reporter_test.go View File

@ -43,11 +43,11 @@ func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
bHistogram := map[bh.PeerBehaviour]int{}
for _, behaviour := range a {
aHistogram[behaviour] += 1
aHistogram[behaviour]++
}
for _, behaviour := range b {
bHistogram[behaviour] += 1
bHistogram[behaviour]++
}
if len(aHistogram) != len(bHistogram) {


+ 1
- 1
blockchain/v2/routine_test.go View File

@ -77,7 +77,7 @@ func genStatefulHandler(maxCount int) handleFunc {
counter := 0
return func(event Event) (Event, error) {
if _, ok := event.(eventA); ok {
counter += 1
counter++
if counter >= maxCount {
return noOp, finalCount{counter}
}


+ 1
- 1
cmd/tendermint/commands/run_node.go View File

@ -69,7 +69,7 @@ func AddNodeFlags(cmd *cobra.Command) {
// NewRunNodeCmd returns the command that allows the CLI to start a node.
// It can be used with a custom PrivValidator and in-process ABCI application.
func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
cmd := &cobra.Command{
Use: "node",
Short: "Run the tendermint node",


+ 9
- 8
consensus/byzantine_test.go View File

@ -70,7 +70,7 @@ func TestByzantine(t *testing.T) {
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
require.NoError(t, err)
conR := NewConsensusReactor(css[i], true) // so we don't start the consensus states
conR := NewReactor(css[i], true) // so we don't start the consensus states
conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus)
@ -90,7 +90,7 @@ func TestByzantine(t *testing.T) {
if rr, ok := r.(*ByzantineReactor); ok {
rr.reactor.Switch.Stop()
} else {
r.(*ConsensusReactor).Switch.Stop()
r.(*Reactor).Switch.Stop()
}
}
}()
@ -110,7 +110,7 @@ func TestByzantine(t *testing.T) {
// start the non-byz state machines.
// note these must be started before the byz
for i := 1; i < N; i++ {
cr := reactors[i].(*ConsensusReactor)
cr := reactors[i].(*Reactor)
cr.SwitchToConsensus(cr.conS.GetState(), 0)
}
@ -171,7 +171,7 @@ func TestByzantine(t *testing.T) {
//-------------------------------
// byzantine consensus functions
func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *ConsensusState, sw *p2p.Switch) {
func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *State, sw *p2p.Switch) {
// byzantine user should create two proposals and try to split the vote.
// Avoid sending on internalMsgQueue and running consensus state.
@ -209,11 +209,12 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
func sendProposalAndParts(
height int64,
round int,
cs *ConsensusState,
cs *State,
peer p2p.Peer,
proposal *types.Proposal,
blockHash []byte,
parts *types.PartSet) {
parts *types.PartSet,
) {
// proposal
msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
@ -244,10 +245,10 @@ func sendProposalAndParts(
type ByzantineReactor struct {
cmn.Service
reactor *ConsensusReactor
reactor *Reactor
}
func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor {
func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
return &ByzantineReactor{
Service: conR,
reactor: conR,


+ 1
- 1
consensus/codec.go View File

@ -8,7 +8,7 @@ import (
var cdc = amino.NewCodec()
func init() {
RegisterConsensusMessages(cdc)
RegisterMessages(cdc)
RegisterWALMessages(cdc)
types.RegisterBlockAmino(cdc)
}

+ 38
- 31
consensus/common_test.go View File

@ -149,17 +149,18 @@ func (vss ValidatorStubsByAddress) Swap(i, j int) {
//-------------------------------------------------------------------------------
// Functions for transitioning the consensus state
func startTestRound(cs *ConsensusState, height int64, round int) {
func startTestRound(cs *State, height int64, round int) {
cs.enterNewRound(height, round)
cs.startRoutines(0)
}
// Create proposal block from cs1 but sign it with vs.
func decideProposal(
cs1 *ConsensusState,
cs1 *State,
vs *validatorStub,
height int64,
round int) (proposal *types.Proposal, block *types.Block) {
round int,
) (proposal *types.Proposal, block *types.Block) {
cs1.mtx.Lock()
block, blockParts := cs1.createProposalBlock()
validRound := cs1.ValidRound
@ -178,23 +179,24 @@ func decideProposal(
return
}
func addVotes(to *ConsensusState, votes ...*types.Vote) {
func addVotes(to *State, votes ...*types.Vote) {
for _, vote := range votes {
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}}
}
}
func signAddVotes(
to *ConsensusState,
to *State,
voteType types.SignedMsgType,
hash []byte,
header types.PartSetHeader,
vss ...*validatorStub) {
vss ...*validatorStub,
) {
votes := signVotes(voteType, hash, header, vss...)
addVotes(to, votes...)
}
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
func validatePrevote(t *testing.T, cs *State, round int, privVal *validatorStub, blockHash []byte) {
prevotes := cs.Votes.Prevotes(round)
address := privVal.GetPubKey().Address()
var vote *types.Vote
@ -212,7 +214,7 @@ func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *valid
}
}
func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) {
func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) {
votes := cs.LastCommit
address := privVal.GetPubKey().Address()
var vote *types.Vote
@ -226,12 +228,13 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
func validatePrecommit(
t *testing.T,
cs *ConsensusState,
cs *State,
thisRound,
lockRound int,
privVal *validatorStub,
votedBlockHash,
lockedBlockHash []byte) {
lockedBlockHash []byte,
) {
precommits := cs.Votes.Precommits(thisRound)
address := privVal.GetPubKey().Address()
var vote *types.Vote
@ -272,12 +275,13 @@ func validatePrecommit(
func validatePrevoteAndPrecommit(
t *testing.T,
cs *ConsensusState,
cs *State,
thisRound,
lockRound int,
privVal *validatorStub,
votedBlockHash,
lockedBlockHash []byte) {
lockedBlockHash []byte,
) {
// verify the prevote
validatePrevote(t, cs, thisRound, privVal, votedBlockHash)
// verify precommit
@ -286,7 +290,7 @@ func validatePrevoteAndPrecommit(
cs.mtx.Unlock()
}
func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message {
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote)
if err != nil {
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
@ -307,26 +311,28 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
//-------------------------------------------------------------------------------
// consensus states
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State {
config := cfg.ResetTestRoot("consensus_state_test")
return newConsensusStateWithConfig(config, state, pv, app)
return newStateWithConfig(config, state, pv, app)
}
func newConsensusStateWithConfig(
func newStateWithConfig(
thisConfig *cfg.Config,
state sm.State,
pv types.PrivValidator,
app abci.Application) *ConsensusState {
app abci.Application,
) *State {
blockDB := dbm.NewMemDB()
return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB)
return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB)
}
func newConsensusStateWithConfigAndBlockStore(
func newStateWithConfigAndBlockStore(
thisConfig *cfg.Config,
state sm.State,
pv types.PrivValidator,
app abci.Application,
blockDB dbm.DB) *ConsensusState {
blockDB dbm.DB,
) *State {
// Get BlockStore
blockStore := store.NewBlockStore(blockDB)
@ -345,11 +351,11 @@ func newConsensusStateWithConfigAndBlockStore(
// mock the evidence pool
evpool := sm.MockEvidencePool{}
// Make ConsensusState
// Make State
stateDB := blockDB
sm.SaveState(stateDB, state) //for save height 1's validators info
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
@ -369,13 +375,13 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV {
return privValidator
}
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
func randState(nValidators int) (*State, []*validatorStub) {
// Get State
state, privVals := randGenesisState(nValidators, false, 10)
vss := make([]*validatorStub, nValidators)
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
cs := newState(state, privVals[0], counter.NewApplication(true))
for i := 0; i < nValidators; i++ {
vss[i] = NewValidatorStub(privVals[i], i)
@ -615,9 +621,9 @@ func consensusLogger() log.Logger {
}
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*ConsensusState, cleanupFunc) {
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
css := make([]*ConsensusState, nValidators)
css := make([]*State, nValidators)
logger := consensusLogger()
configRootDirs := make([]string, 0, nValidators)
for i := 0; i < nValidators; i++ {
@ -633,7 +639,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
@ -650,9 +656,10 @@ func randConsensusNetWithPeers(
nPeers int,
testName string,
tickerFunc func() TimeoutTicker,
appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
appFunc func(string) abci.Application,
) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
css := make([]*ConsensusState, nPeers)
css := make([]*State, nPeers)
logger := consensusLogger()
var peer0Config *cfg.Config
configRootDirs := make([]string, 0, nPeers)
@ -690,7 +697,7 @@ func randConsensusNetWithPeers(
app.InitChain(abci.RequestInitChain{Validators: vals})
//sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
css[i] = newStateWithConfig(thisConfig, state, privVal, app)
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
@ -790,7 +797,7 @@ func (*mockTicker) SetLogger(log.Logger) {}
//------------------------------------
func newCounter() abci.Application {
return counter.NewCounterApplication(true)
return counter.NewApplication(true)
}
func newPersistentKVStore() abci.Application {


+ 6
- 6
consensus/mempool_test.go View File

@ -27,7 +27,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
assertMempool(cs.txNotifier).EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
@ -46,7 +46,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
assertMempool(cs.txNotifier).EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
@ -62,7 +62,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
assertMempool(cs.txNotifier).EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
@ -94,7 +94,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
ensureNewEventOnChannel(newBlockCh) // now we can commit the block
}
func deliverTxsRange(cs *ConsensusState, start, end int) {
func deliverTxsRange(cs *State, start, end int) {
// Deliver some txs.
for i := start; i < end; i++ {
txBytes := make([]byte, 8)
@ -109,7 +109,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
blockDB := dbm.NewMemDB()
cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
sm.SaveState(blockDB, state)
newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader)
@ -132,7 +132,7 @@ func TestMempoolRmBadTx(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
app := NewCounterApplication()
blockDB := dbm.NewMemDB()
cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
sm.SaveState(blockDB, state)
// increment the counter by 1


+ 46
- 45
consensus/reactor.go View File

@ -33,11 +33,11 @@ const (
//-----------------------------------------------------------------------------
// ConsensusReactor defines a reactor for the consensus service.
type ConsensusReactor struct {
// Reactor defines a reactor for the consensus service.
type Reactor struct {
p2p.BaseReactor // BaseService + p2p.Switch
conS *ConsensusState
conS *State
mtx sync.RWMutex
fastSync bool
@ -46,18 +46,18 @@ type ConsensusReactor struct {
metrics *Metrics
}
type ReactorOption func(*ConsensusReactor)
type ReactorOption func(*Reactor)
// NewConsensusReactor returns a new ConsensusReactor with the given
// NewReactor returns a new Reactor with the given
// consensusState.
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options ...ReactorOption) *ConsensusReactor {
conR := &ConsensusReactor{
func NewReactor(consensusState *State, fastSync bool, options ...ReactorOption) *Reactor {
conR := &Reactor{
conS: consensusState,
fastSync: fastSync,
metrics: NopMetrics(),
}
conR.updateFastSyncingMetric()
conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR)
conR.BaseReactor = *p2p.NewBaseReactor("Reactor", conR)
for _, option := range options {
option(conR)
@ -68,8 +68,8 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options
// OnStart implements BaseService by subscribing to events, which later will be
// broadcasted to other peers and starting state if we're not in fast sync.
func (conR *ConsensusReactor) OnStart() error {
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
func (conR *Reactor) OnStart() error {
conR.Logger.Info("Reactor ", "fastSync", conR.FastSync())
// start routine that computes peer statistics for evaluating peer quality
go conR.peerStatsRoutine()
@ -88,7 +88,7 @@ func (conR *ConsensusReactor) OnStart() error {
// OnStop implements BaseService by unsubscribing from events and stopping
// state.
func (conR *ConsensusReactor) OnStop() {
func (conR *Reactor) OnStop() {
conR.unsubscribeFromBroadcastEvents()
conR.conS.Stop()
if !conR.FastSync() {
@ -98,7 +98,7 @@ func (conR *ConsensusReactor) OnStop() {
// SwitchToConsensus switches from fast_sync mode to consensus mode.
// It resets the state, turns off fast_sync, and starts the consensus state-machine
func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int) {
func (conR *Reactor) SwitchToConsensus(state sm.State, blocksSynced int) {
conR.Logger.Info("SwitchToConsensus")
conR.conS.reconstructLastCommit(state)
// NOTE: The line below causes broadcastNewRoundStepRoutine() to
@ -127,7 +127,7 @@ conR:
}
// GetChannels implements Reactor
func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
// TODO optimize
return []*p2p.ChannelDescriptor{
{
@ -162,7 +162,7 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
}
// InitPeer implements Reactor by creating a state for the peer.
func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer {
func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
peerState := NewPeerState(peer).SetLogger(conR.Logger)
peer.Set(types.PeerStateKey, peerState)
return peer
@ -170,7 +170,7 @@ func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer {
// AddPeer implements Reactor by spawning multiple gossiping goroutines for the
// peer.
func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
func (conR *Reactor) AddPeer(peer p2p.Peer) {
if !conR.IsRunning() {
return
}
@ -192,7 +192,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
}
// RemovePeer is a noop.
func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
if !conR.IsRunning() {
return
}
@ -210,7 +210,7 @@ func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Peer state updates can happen in parallel, but processing of
// proposals, block parts, and votes are ordered by the receiveRoutine
// NOTE: blocks on consensus state for proposals, block parts, and votes
func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
if !conR.IsRunning() {
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
return
@ -360,13 +360,13 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
}
// SetEventBus sets event bus.
func (conR *ConsensusReactor) SetEventBus(b *types.EventBus) {
func (conR *Reactor) SetEventBus(b *types.EventBus) {
conR.eventBus = b
conR.conS.SetEventBus(b)
}
// FastSync returns whether the consensus reactor is in fast-sync mode.
func (conR *ConsensusReactor) FastSync() bool {
func (conR *Reactor) FastSync() bool {
conR.mtx.RLock()
defer conR.mtx.RUnlock()
return conR.fastSync
@ -377,7 +377,7 @@ func (conR *ConsensusReactor) FastSync() bool {
// subscribeToBroadcastEvents subscribes for new round steps and votes
// using internal pubsub defined on state to broadcast
// them to peers upon receiving.
func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
func (conR *Reactor) subscribeToBroadcastEvents() {
const subscriber = "consensus-reactor"
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
func(data tmevents.EventData) {
@ -396,17 +396,17 @@ func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
}
func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() {
func (conR *Reactor) unsubscribeFromBroadcastEvents() {
const subscriber = "consensus-reactor"
conR.conS.evsw.RemoveListener(subscriber)
}
func (conR *ConsensusReactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
nrsMsg := makeRoundStepMessage(rs)
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
func (conR *ConsensusReactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
csMsg := &NewValidBlockMessage{
Height: rs.Height,
Round: rs.Round,
@ -418,7 +418,7 @@ func (conR *ConsensusReactor) broadcastNewValidBlockMessage(rs *cstypes.RoundSta
}
// Broadcasts HasVoteMessage to peers that care.
func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
msg := &HasVoteMessage{
Height: vote.Height,
Round: vote.Round,
@ -457,13 +457,13 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage)
return
}
func (conR *ConsensusReactor) sendNewRoundStepMessage(peer p2p.Peer) {
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
rs := conR.conS.GetRoundState()
nrsMsg := makeRoundStepMessage(rs)
peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
@ -559,7 +559,7 @@ OUTER_LOOP:
}
}
func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) {
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
@ -602,7 +602,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
}
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
// Simple hack to throttle logs upon sleep.
@ -673,11 +673,12 @@ OUTER_LOOP:
}
}
func (conR *ConsensusReactor) gossipVotesForHeight(
func (conR *Reactor) gossipVotesForHeight(
logger log.Logger,
rs *cstypes.RoundState,
prs *cstypes.PeerRoundState,
ps *PeerState) bool {
ps *PeerState,
) bool {
// If there are lastCommits to send...
if prs.Step == cstypes.RoundStepNewHeight {
@ -733,7 +734,7 @@ func (conR *ConsensusReactor) gossipVotesForHeight(
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
// into play for liveness when there's a signature DDoS attack happening.
func (conR *ConsensusReactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
@ -819,7 +820,7 @@ OUTER_LOOP:
}
}
func (conR *ConsensusReactor) peerStatsRoutine() {
func (conR *Reactor) peerStatsRoutine() {
for {
if !conR.IsRunning() {
conR.Logger.Info("Stopping peerStatsRoutine")
@ -859,16 +860,16 @@ func (conR *ConsensusReactor) peerStatsRoutine() {
}
}
// String returns a string representation of the ConsensusReactor.
// String returns a string representation of the Reactor.
// NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables.
// TODO: improve!
func (conR *ConsensusReactor) String() string {
func (conR *Reactor) String() string {
// better not to access shared variables
return "ConsensusReactor" // conR.StringIndented("")
}
// StringIndented returns an indented string representation of the ConsensusReactor
func (conR *ConsensusReactor) StringIndented(indent string) string {
// StringIndented returns an indented string representation of the Reactor
func (conR *Reactor) StringIndented(indent string) string {
s := "ConsensusReactor{\n"
s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n"
for _, peer := range conR.Switch.Peers().List() {
@ -882,7 +883,7 @@ func (conR *ConsensusReactor) StringIndented(indent string) string {
return s
}
func (conR *ConsensusReactor) updateFastSyncingMetric() {
func (conR *Reactor) updateFastSyncingMetric() {
var fastSyncing float64
if conR.fastSync {
fastSyncing = 1
@ -894,7 +895,7 @@ func (conR *ConsensusReactor) updateFastSyncingMetric() {
// ReactorMetrics sets the metrics
func ReactorMetrics(metrics *Metrics) ReactorOption {
return func(conR *ConsensusReactor) { conR.metrics = metrics }
return func(conR *Reactor) { conR.metrics = metrics }
}
//-----------------------------------------------------------------------------
@ -1381,13 +1382,13 @@ func (ps *PeerState) StringIndented(indent string) string {
//-----------------------------------------------------------------------------
// Messages
// ConsensusMessage is a message that can be sent and received on the ConsensusReactor
type ConsensusMessage interface {
// Message is a message that can be sent and received on the Reactor
type Message interface {
ValidateBasic() error
}
func RegisterConsensusMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*ConsensusMessage)(nil), nil)
func RegisterMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*Message)(nil), nil)
cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil)
cdc.RegisterConcrete(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage", nil)
cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil)
@ -1399,7 +1400,7 @@ func RegisterConsensusMessages(cdc *amino.Codec) {
cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil)
}
func decodeMsg(bz []byte) (msg ConsensusMessage, err error) {
func decodeMsg(bz []byte) (msg Message, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
@ -1471,7 +1472,7 @@ func (m *NewValidBlockMessage) ValidateBasic() error {
return fmt.Errorf("wrong BlockPartsHeader: %v", err)
}
if m.BlockParts.Size() == 0 {
return errors.New("Empty BlockParts")
return errors.New("empty blockParts")
}
if m.BlockParts.Size() != m.BlockPartsHeader.Total {
return fmt.Errorf("blockParts bit array size %d not equal to BlockPartsHeader.Total %d",
@ -1479,7 +1480,7 @@ func (m *NewValidBlockMessage) ValidateBasic() error {
m.BlockPartsHeader.Total)
}
if m.BlockParts.Size() > types.MaxBlockPartsCount {
return errors.Errorf("BlockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount)
return errors.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount)
}
return nil
}


+ 15
- 15
consensus/reactor_test.go View File

@ -34,18 +34,18 @@ import (
//----------------------------------------------
// in-process testnets
func startConsensusNet(t *testing.T, css []*ConsensusState, n int) (
[]*ConsensusReactor,
func startConsensusNet(t *testing.T, css []*State, n int) (
[]*Reactor,
[]types.Subscription,
[]*types.EventBus,
) {
reactors := make([]*ConsensusReactor, n)
reactors := make([]*Reactor, n)
blocksSubs := make([]types.Subscription, 0)
eventBuses := make([]*types.EventBus, n)
for i := 0; i < n; i++ {
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
if err != nil { t.Fatal(err)}*/
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
reactors[i].SetLogger(css[i].Logger)
// eventBus is already started with the cs
@ -78,10 +78,10 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, n int) (
return reactors, blocksSubs, eventBuses
}
func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuses []*types.EventBus) {
func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) {
logger.Info("stopConsensusNet", "n", len(reactors))
for i, r := range reactors {
logger.Info("stopConsensusNet: Stopping ConsensusReactor", "i", i)
logger.Info("stopConsensusNet: Stopping Reactor", "i", i)
r.Switch.Stop()
}
for i, b := range eventBuses {
@ -119,7 +119,7 @@ func TestReactorWithEvidence(t *testing.T) {
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
css := make([]*ConsensusState, nValidators)
css := make([]*State, nValidators)
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
@ -133,7 +133,7 @@ func TestReactorWithEvidence(t *testing.T) {
pv := privVals[i]
// duplicate code from:
// css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
// css[i] = newStateWithConfig(thisConfig, state, privVals[i], app)
blockDB := dbm.NewMemDB()
blockStore := store.NewBlockStore(blockDB)
@ -156,9 +156,9 @@ func TestReactorWithEvidence(t *testing.T) {
addr := privVals[vIdx].GetPubKey().Address()
evpool := newMockEvidencePool(addr)
// Make ConsensusState
// Make State
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
@ -521,7 +521,7 @@ func waitForAndValidateBlock(
n int,
activeVals map[string]struct{},
blocksSubs []types.Subscription,
css []*ConsensusState,
css []*State,
txs ...[]byte,
) {
timeoutWaitGroup(t, n, func(j int) {
@ -543,7 +543,7 @@ func waitForAndValidateBlockWithTx(
n int,
activeVals map[string]struct{},
blocksSubs []types.Subscription,
css []*ConsensusState,
css []*State,
txs ...[]byte,
) {
timeoutWaitGroup(t, n, func(j int) {
@ -578,7 +578,7 @@ func waitForBlockWithUpdatedValsAndValidateIt(
n int,
updatedVals map[string]struct{},
blocksSubs []types.Subscription,
css []*ConsensusState,
css []*State,
) {
timeoutWaitGroup(t, n, func(j int) {
@ -621,7 +621,7 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
return nil
}
func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*ConsensusState) {
func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) {
wg := new(sync.WaitGroup)
wg.Add(n)
for i := 0; i < n; i++ {
@ -712,7 +712,7 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) {
},
{
func(msg *NewValidBlockMessage) { msg.BlockPartsHeader.Total = 0; msg.BlockParts = cmn.NewBitArray(0) },
"Empty BlockParts",
"empty blockParts",
},
{
func(msg *NewValidBlockMessage) { msg.BlockParts = cmn.NewBitArray(types.MaxBlockPartsCount + 1) },


+ 3
- 3
consensus/replay.go View File

@ -42,7 +42,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli)
// Unmarshal and apply a single message to the consensus state as if it were
// received in receiveRoutine. Lines that start with "#" are ignored.
// NOTE: receiveRoutine should not be running.
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error {
func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error {
// Skip meta messages which exist for demarcating boundaries.
if _, ok := msg.Msg.(EndHeightMessage); ok {
return nil
@ -97,7 +97,7 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepSub typ
// Replay only those messages since the last block. `timeoutRoutine` should
// run concurrently to read off tickChan.
func (cs *ConsensusState) catchupReplay(csHeight int64) error {
func (cs *State) catchupReplay(csHeight int64) error {
// Set replayMode to true so we don't log signing errors.
cs.replayMode = true
@ -105,7 +105,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
// Ensure that #ENDHEIGHT for this height doesn't exist.
// NOTE: This is just a sanity check. As far as we know things work fine
// without it, and Handshake could reuse ConsensusState if it weren't for
// without it, and Handshake could reuse State if it weren't for
// this check (since we can crash after writing #ENDHEIGHT).
//
// Ignore data corruption errors since this is a sanity check.


+ 7
- 7
consensus/replay_file.go View File

@ -40,7 +40,7 @@ func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console
}
// Replay msgs in file or start the console
func (cs *ConsensusState) ReplayFile(file string, console bool) error {
func (cs *State) ReplayFile(file string, console bool) error {
if cs.IsRunning() {
return errors.New("cs is already running, cannot replay")
@ -98,7 +98,7 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
// playback manager
type playback struct {
cs *ConsensusState
cs *State
fp *os.File
dec *WALDecoder
@ -109,7 +109,7 @@ type playback struct {
genesisState sm.State // so the replay session knows where to restart from
}
func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState sm.State) *playback {
func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *playback {
return &playback{
cs: cs,
fp: fp,
@ -124,7 +124,7 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error
pb.cs.Stop()
pb.cs.Wait()
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
newCS.SetEventBus(pb.cs.eventBus)
newCS.startForReplay()
@ -158,7 +158,7 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error
return nil
}
func (cs *ConsensusState) startForReplay() {
func (cs *State) startForReplay() {
cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests")
/* TODO:!
// since we replay tocks we just ignore ticks
@ -274,7 +274,7 @@ func (pb *playback) replayConsoleLoop() int {
//--------------------------------------------------------------------------------
// convenience for replay mode
func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *ConsensusState {
func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State {
dbType := dbm.BackendType(config.DBBackend)
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir())
@ -314,7 +314,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
mempool, evpool := mock.Mempool{}, sm.MockEvidencePool{}
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
consensusState := NewConsensusState(csConfig, state.Copy(), blockExec,
consensusState := NewState(csConfig, state.Copy(), blockExec,
blockStore, mempool, evpool)
consensusState.SetEventBus(eventBus)


+ 15
- 13
consensus/replay_test.go View File

@ -63,17 +63,18 @@ func TestMain(m *testing.M) {
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
logger := log.TestingLogger()
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig)
cs := newConsensusStateWithConfigAndBlockStore(
cs := newStateWithConfigAndBlockStore(
consensusReplayConfig,
state,
privValidator,
kvstore.NewKVStoreApplication(),
blockDB)
kvstore.NewApplication(),
blockDB,
)
cs.SetLogger(logger)
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
@ -98,7 +99,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *
}
}
func sendTxs(ctx context.Context, cs *ConsensusState) {
func sendTxs(ctx context.Context, cs *State) {
for i := 0; i < 256; i++ {
select {
case <-ctx.Done():
@ -115,14 +116,14 @@ func sendTxs(ctx context.Context, cs *ConsensusState) {
func TestWALCrash(t *testing.T) {
testCases := []struct {
name string
initFn func(dbm.DB, *ConsensusState, context.Context)
initFn func(dbm.DB, *State, context.Context)
heightToStop int64
}{
{"empty block",
func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {},
func(stateDB dbm.DB, cs *State, ctx context.Context) {},
1},
{"many non-empty blocks",
func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {
func(stateDB dbm.DB, cs *State, ctx context.Context) {
go sendTxs(ctx, cs)
},
3},
@ -138,7 +139,7 @@ func TestWALCrash(t *testing.T) {
}
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
walPanicked := make(chan error)
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
@ -153,12 +154,13 @@ LOOP:
stateDB := blockDB
state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig)
cs := newConsensusStateWithConfigAndBlockStore(
cs := newStateWithConfigAndBlockStore(
consensusReplayConfig,
state,
privValidator,
kvstore.NewKVStoreApplication(),
blockDB)
kvstore.NewApplication(),
blockDB,
)
cs.SetLogger(logger)
// start sending transactions
@ -188,7 +190,7 @@ LOOP:
t.Logf("WAL panicked: %v", err)
// make sure we can make blocks after a crash
startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
// stop consensus state and transactions sender (initFn)
cs.Stop()


+ 87
- 83
consensus/state.go View File

@ -41,8 +41,8 @@ var (
// msgs from the reactor which may update the state
type msgInfo struct {
Msg ConsensusMessage `json:"msg"`
PeerID p2p.ID `json:"peer_key"`
Msg Message `json:"msg"`
PeerID p2p.ID `json:"peer_key"`
}
// internally generated messages which may update the state
@ -67,11 +67,11 @@ type evidencePool interface {
AddEvidence(types.Evidence) error
}
// ConsensusState handles execution of the consensus algorithm.
// State handles execution of the consensus algorithm.
// It processes votes and proposals, and upon reaching agreement,
// commits blocks to the chain and executes them against the application.
// The internal state machine receives input from peers, the internal validator, and from a timer.
type ConsensusState struct {
type State struct {
cmn.BaseService
// config details
@ -135,11 +135,11 @@ type ConsensusState struct {
metrics *Metrics
}
// StateOption sets an optional parameter on the ConsensusState.
type StateOption func(*ConsensusState)
// StateOption sets an optional parameter on the State.
type StateOption func(*State)
// NewConsensusState returns a new ConsensusState.
func NewConsensusState(
// NewState returns a new State.
func NewState(
config *cfg.ConsensusConfig,
state sm.State,
blockExec *sm.BlockExecutor,
@ -147,8 +147,8 @@ func NewConsensusState(
txNotifier txNotifier,
evpool evidencePool,
options ...StateOption,
) *ConsensusState {
cs := &ConsensusState{
) *State {
cs := &State{
config: config,
blockExec: blockExec,
blockStore: blockStore,
@ -174,7 +174,7 @@ func NewConsensusState(
// Don't call scheduleRound0 yet.
// We do that upon Start().
cs.reconstructLastCommit(state)
cs.BaseService = *cmn.NewBaseService(nil, "ConsensusState", cs)
cs.BaseService = *cmn.NewBaseService(nil, "State", cs)
for _, option := range options {
option(cs)
}
@ -185,30 +185,30 @@ func NewConsensusState(
// Public interface
// SetLogger implements Service.
func (cs *ConsensusState) SetLogger(l log.Logger) {
func (cs *State) SetLogger(l log.Logger) {
cs.BaseService.Logger = l
cs.timeoutTicker.SetLogger(l)
}
// SetEventBus sets event bus.
func (cs *ConsensusState) SetEventBus(b *types.EventBus) {
func (cs *State) SetEventBus(b *types.EventBus) {
cs.eventBus = b
cs.blockExec.SetEventBus(b)
}
// StateMetrics sets the metrics.
func StateMetrics(metrics *Metrics) StateOption {
return func(cs *ConsensusState) { cs.metrics = metrics }
return func(cs *State) { cs.metrics = metrics }
}
// String returns a string.
func (cs *ConsensusState) String() string {
func (cs *State) String() string {
// better not to access shared variables
return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
}
// GetState returns a copy of the chain state.
func (cs *ConsensusState) GetState() sm.State {
func (cs *State) GetState() sm.State {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cs.state.Copy()
@ -216,14 +216,14 @@ func (cs *ConsensusState) GetState() sm.State {
// GetLastHeight returns the last height committed.
// If there were no blocks, returns 0.
func (cs *ConsensusState) GetLastHeight() int64 {
func (cs *State) GetLastHeight() int64 {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cs.RoundState.Height - 1
}
// GetRoundState returns a shallow copy of the internal consensus state.
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
func (cs *State) GetRoundState() *cstypes.RoundState {
cs.mtx.RLock()
rs := cs.RoundState // copy
cs.mtx.RUnlock()
@ -231,42 +231,42 @@ func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
}
// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino.
func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
func (cs *State) GetRoundStateJSON() ([]byte, error) {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cdc.MarshalJSON(cs.RoundState)
}
// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino.
func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) {
func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cdc.MarshalJSON(cs.RoundState.RoundStateSimple())
}
// GetValidators returns a copy of the current validators.
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
func (cs *State) GetValidators() (int64, []*types.Validator) {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
}
// SetPrivValidator sets the private validator account for signing votes.
func (cs *ConsensusState) SetPrivValidator(priv types.PrivValidator) {
func (cs *State) SetPrivValidator(priv types.PrivValidator) {
cs.mtx.Lock()
cs.privValidator = priv
cs.mtx.Unlock()
}
// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing.
func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
func (cs *State) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
cs.mtx.Lock()
cs.timeoutTicker = timeoutTicker
cs.mtx.Unlock()
}
// LoadCommit loads the commit for a given height.
func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
func (cs *State) LoadCommit(height int64) *types.Commit {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
if height == cs.blockStore.Height() {
@ -277,7 +277,7 @@ func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
// OnStart implements cmn.Service.
// It loads the latest state via the WAL, and starts the timeout and receive routines.
func (cs *ConsensusState) OnStart() error {
func (cs *State) OnStart() error {
if err := cs.evsw.Start(); err != nil {
return err
}
@ -288,7 +288,7 @@ func (cs *ConsensusState) OnStart() error {
walFile := cs.config.WalFile()
wal, err := cs.OpenWAL(walFile)
if err != nil {
cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error())
cs.Logger.Error("Error loading State wal", "err", err.Error())
return err
}
cs.wal = wal
@ -324,7 +324,7 @@ go run scripts/json2wal/main.go wal.json $WALFILE # rebuild the file without cor
return err
}
cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error())
cs.Logger.Error("Error on catchup replay. Proceeding to start State anyway", "err", err.Error())
// NOTE: if we ever do return an error here,
// make sure to stop the timeoutTicker
}
@ -342,7 +342,7 @@ go run scripts/json2wal/main.go wal.json $WALFILE # rebuild the file without cor
// timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan
// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions
func (cs *ConsensusState) startRoutines(maxSteps int) {
func (cs *State) startRoutines(maxSteps int) {
err := cs.timeoutTicker.Start()
if err != nil {
cs.Logger.Error("Error starting timeout ticker", "err", err)
@ -352,7 +352,7 @@ func (cs *ConsensusState) startRoutines(maxSteps int) {
}
// OnStop implements cmn.Service.
func (cs *ConsensusState) OnStop() {
func (cs *State) OnStop() {
cs.evsw.Stop()
cs.timeoutTicker.Stop()
// WAL is stopped in receiveRoutine.
@ -361,12 +361,12 @@ func (cs *ConsensusState) OnStop() {
// Wait waits for the the main routine to return.
// NOTE: be sure to Stop() the event switch and drain
// any event channels or this may deadlock
func (cs *ConsensusState) Wait() {
func (cs *State) Wait() {
<-cs.done
}
// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability
func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) {
func (cs *State) OpenWAL(walFile string) (WAL, error) {
wal, err := NewWAL(walFile)
if err != nil {
cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err)
@ -387,7 +387,7 @@ func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) {
// TODO: should these return anything or let callers just use events?
// AddVote inputs a vote.
func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
} else {
@ -399,7 +399,7 @@ func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
}
// SetProposal inputs a proposal.
func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
@ -412,7 +412,7 @@ func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2p.ID) e
}
// AddProposalBlockPart inputs a part of the proposal block.
func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2p.ID) error {
func (cs *State) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2p.ID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
@ -425,11 +425,12 @@ func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *ty
}
// SetProposalAndBlock inputs the proposal and all block parts.
func (cs *ConsensusState) SetProposalAndBlock(
func (cs *State) SetProposalAndBlock(
proposal *types.Proposal,
block *types.Block,
parts *types.PartSet,
peerID p2p.ID) error {
peerID p2p.ID,
) error {
if err := cs.SetProposal(proposal, peerID); err != nil {
return err
}
@ -445,30 +446,30 @@ func (cs *ConsensusState) SetProposalAndBlock(
//------------------------------------------------------------
// internal functions for managing the state
func (cs *ConsensusState) updateHeight(height int64) {
func (cs *State) updateHeight(height int64) {
cs.metrics.Height.Set(float64(height))
cs.Height = height
}
func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType) {
func (cs *State) updateRoundStep(round int, step cstypes.RoundStepType) {
cs.Round = round
cs.Step = step
}
// enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
func (cs *State) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(tmtime.Now())
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
}
// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan)
func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height int64, round int, step cstypes.RoundStepType) {
func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int, step cstypes.RoundStepType) {
cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step})
}
// send a msg into the receiveRoutine regarding our own proposal, block part, or vote
func (cs *ConsensusState) sendInternalMessage(mi msgInfo) {
func (cs *State) sendInternalMessage(mi msgInfo) {
select {
case cs.internalMsgQueue <- mi:
default:
@ -483,7 +484,7 @@ func (cs *ConsensusState) sendInternalMessage(mi msgInfo) {
// Reconstruct LastCommit from SeenCommit, which we saved along with the block,
// (which happens even before saving the state)
func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
func (cs *State) reconstructLastCommit(state sm.State) {
if state.LastBlockHeight == 0 {
return
}
@ -495,9 +496,9 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
cs.LastCommit = lastPrecommits
}
// Updates ConsensusState and increments height to match that of state.
// Updates State and increments height to match that of state.
// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight.
func (cs *ConsensusState) updateToState(state sm.State) {
func (cs *State) updateToState(state sm.State) {
if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight {
panic(fmt.Sprintf("updateToState() expected state height of %v but found %v",
cs.Height, state.LastBlockHeight))
@ -574,11 +575,11 @@ func (cs *ConsensusState) updateToState(state sm.State) {
cs.newStep()
}
func (cs *ConsensusState) newStep() {
func (cs *State) newStep() {
rs := cs.RoundStateEvent()
cs.wal.Write(rs)
cs.nSteps++
// newStep is called by updateToState in NewConsensusState before the eventBus is set!
// newStep is called by updateToState in NewState before the eventBus is set!
if cs.eventBus != nil {
cs.eventBus.PublishEventNewRoundStep(rs)
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
@ -592,9 +593,9 @@ func (cs *ConsensusState) newStep() {
// it's argument (n) is the number of messages to process before exiting - use 0 to run forever
// It keeps the RoundState and is the only thing that updates it.
// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
// ConsensusState must be locked before any internal state is updated.
func (cs *ConsensusState) receiveRoutine(maxSteps int) {
onExit := func(cs *ConsensusState) {
// State must be locked before any internal state is updated.
func (cs *State) receiveRoutine(maxSteps int) {
onExit := func(cs *State) {
// NOTE: the internalMsgQueue may have signed messages from our
// priv_val that haven't hit the WAL, but its ok because
// priv_val tracks LastSig
@ -669,7 +670,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
}
// state transitions on complete-proposal, 2/3-any, 2/3-one
func (cs *ConsensusState) handleMsg(mi msgInfo) {
func (cs *State) handleMsg(mi msgInfo) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
@ -736,7 +737,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
}
}
func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
cs.Logger.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// timeouts must be for current height, round, step
@ -772,7 +773,7 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
}
func (cs *ConsensusState) handleTxsAvailable() {
func (cs *State) handleTxsAvailable() {
cs.mtx.Lock()
defer cs.mtx.Unlock()
@ -806,7 +807,7 @@ func (cs *ConsensusState) handleTxsAvailable() {
// Enter: +2/3 precommits for nil at (height,round-1)
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
// NOTE: cs.StartTime was already set for height.
func (cs *ConsensusState) enterNewRound(height int64, round int) {
func (cs *State) enterNewRound(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
@ -870,7 +871,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
// needProofBlock returns true on the first height (so the genesis app hash is signed right away)
// and where the last block (height-1) caused the app hash to change
func (cs *ConsensusState) needProofBlock(height int64) bool {
func (cs *State) needProofBlock(height int64) bool {
if height == 1 {
return true
}
@ -883,7 +884,7 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ):
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
func (cs *ConsensusState) enterPropose(height int64, round int) {
func (cs *State) enterPropose(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
@ -944,11 +945,11 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
}
}
func (cs *ConsensusState) isProposer(address []byte) bool {
func (cs *State) isProposer(address []byte) bool {
return bytes.Equal(cs.Validators.GetProposer().Address, address)
}
func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
func (cs *State) defaultDecideProposal(height int64, round int) {
var block *types.Block
var blockParts *types.PartSet
@ -988,7 +989,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
// Returns true if the proposal block is complete &&
// (if POLRound was proposed, we have +2/3 prevotes from there).
func (cs *ConsensusState) isProposalComplete() bool {
func (cs *State) isProposalComplete() bool {
if cs.Proposal == nil || cs.ProposalBlock == nil {
return false
}
@ -1007,7 +1008,7 @@ func (cs *ConsensusState) isProposalComplete() bool {
// is returned for convenience so we can log the proposal block.
// Returns nil block upon error.
// NOTE: keep it side-effect free for clarity.
func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
var commit *types.Commit
switch {
case cs.Height == 1:
@ -1031,7 +1032,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
// Enter: proposal block and POL is ready.
// Prevote for LockedBlock if we're locked, or ProposalBlock if valid.
// Otherwise vote nil.
func (cs *ConsensusState) enterPrevote(height int64, round int) {
func (cs *State) enterPrevote(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
cs.Logger.Debug(fmt.Sprintf(
"enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v",
@ -1058,7 +1059,7 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) {
// (so we have more time to try and collect +2/3 prevotes for a single block)
}
func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
func (cs *State) defaultDoPrevote(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
// If a block is locked, prevote that.
@ -1092,7 +1093,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
}
// Enter: any +2/3 prevotes at next round.
func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
func (cs *State) enterPrevoteWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
@ -1126,7 +1127,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round)
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
// else, precommit nil otherwise.
func (cs *ConsensusState) enterPrecommit(height int64, round int) {
func (cs *State) enterPrecommit(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
@ -1228,7 +1229,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
}
// Enter: any +2/3 precommits for next round.
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
func (cs *State) enterPrecommitWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) {
@ -1256,7 +1257,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
}
// Enter: +2/3 precommits for block
func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
func (cs *State) enterCommit(height int64, commitRound int) {
logger := cs.Logger.With("height", height, "commitRound", commitRound)
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
@ -1320,7 +1321,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
}
// If we have the block AND +2/3 commits for it, finalize.
func (cs *ConsensusState) tryFinalizeCommit(height int64) {
func (cs *State) tryFinalizeCommit(height int64) {
logger := cs.Logger.With("height", height)
if cs.Height != height {
@ -1349,7 +1350,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
}
// Increment height and goto cstypes.RoundStepNewHeight
func (cs *ConsensusState) finalizeCommit(height int64) {
func (cs *State) finalizeCommit(height int64) {
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
cs.Logger.Debug(fmt.Sprintf(
"finalizeCommit(%v): Invalid args. Current step: %v/%v/%v",
@ -1409,12 +1410,13 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
// complains about replaying for heights where an #ENDHEIGHT entry already
// exists.
//
// Either way, the ConsensusState should not be resumed until we
// Either way, the State should not be resumed until we
// successfully call ApplyBlock (ie. later here, or in Handshake after
// restart).
endMsg := EndHeightMessage{height}
if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync
panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node", endMsg, err))
panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node",
endMsg, err))
}
fail.Fail() // XXX
@ -1458,7 +1460,7 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
// * cs.StartTime is set to when we will start round0.
}
func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
func (cs *State) recordMetrics(height int64, block *types.Block) {
cs.metrics.Validators.Set(float64(cs.Validators.Size()))
cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower()))
missingValidators := 0
@ -1499,7 +1501,7 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
//-----------------------------------------------------------------------------
func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
// Already have one
// TODO: possibly catch double proposals
if cs.Proposal != nil {
@ -1536,7 +1538,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
// NOTE: block is not necessarily valid.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit,
// once we have the full block.
func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) {
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) {
height, round, part := msg.Height, msg.Round, msg.Part
// Blocks might be reused, so round mismatch is OK
@ -1606,7 +1608,7 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p
}
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
added, err := cs.addVote(vote, peerID)
if err != nil {
// If the vote height is off, we'll just ignore it,
@ -1644,7 +1646,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
//-----------------------------------------------------------------------------
func (cs *ConsensusState) addVote(
func (cs *State) addVote(
vote *types.Vote,
peerID p2p.ID) (added bool, err error) {
cs.Logger.Debug(
@ -1656,7 +1658,8 @@ func (cs *ConsensusState) addVote(
"valIndex",
vote.ValidatorIndex,
"csHeight",
cs.Height)
cs.Height,
)
// A precommit for the previous height?
// These come in while we wait timeoutCommit
@ -1742,7 +1745,7 @@ func (cs *ConsensusState) addVote(
} else {
cs.Logger.Info(
"Valid block we don't know about. Set ProposalBlock=nil",
"proposal", cs.ProposalBlock.Hash(), "blockId", blockID.Hash)
"proposal", cs.ProposalBlock.Hash(), "blockID", blockID.Hash)
// We're getting the wrong block.
cs.ProposalBlock = nil
}
@ -1802,10 +1805,11 @@ func (cs *ConsensusState) addVote(
return added, err
}
func (cs *ConsensusState) signVote(
type_ types.SignedMsgType,
func (cs *State) signVote(
msgType types.SignedMsgType,
hash []byte,
header types.PartSetHeader) (*types.Vote, error) {
header types.PartSetHeader,
) (*types.Vote, error) {
// Flush the WAL. Otherwise, we may not recompute the same vote to sign,
// and the privValidator will refuse to sign anything.
cs.wal.FlushAndSync()
@ -1819,14 +1823,14 @@ func (cs *ConsensusState) signVote(
Height: cs.Height,
Round: cs.Round,
Timestamp: cs.voteTime(),
Type: type_,
Type: msgType,
BlockID: types.BlockID{Hash: hash, PartsHeader: header},
}
err := cs.privValidator.SignVote(cs.state.ChainID, vote)
return vote, err
}
func (cs *ConsensusState) voteTime() time.Time {
func (cs *State) voteTime() time.Time {
now := tmtime.Now()
minVoteTime := now
// TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil,
@ -1846,12 +1850,12 @@ func (cs *ConsensusState) voteTime() time.Time {
}
// sign the vote and publish on internalMsgQueue
func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
func (cs *State) signAddVote(msgType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
// if we don't have a key or we're not in the validator set, do nothing
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetPubKey().Address()) {
return nil
}
vote, err := cs.signVote(type_, hash, header)
vote, err := cs.signVote(msgType, hash, header)
if err == nil {
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)


+ 30
- 30
consensus/state_test.go View File

@ -52,7 +52,7 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh
// ProposeSuite
func TestStateProposerSelection0(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
height, round := cs1.Height, cs1.Round
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
@ -88,7 +88,7 @@ func TestStateProposerSelection0(t *testing.T) {
// Now let's do it all again, but starting from round 2 instead of 0
func TestStateProposerSelection2(t *testing.T) {
cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators
cs1, vss := randState(4) // test needs more work for more than 3 validators
height := cs1.Height
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
@ -123,7 +123,7 @@ func TestStateProposerSelection2(t *testing.T) {
// a non-validator should timeout into the prevote round
func TestStateEnterProposeNoPrivValidator(t *testing.T) {
cs, _ := randConsensusState(1)
cs, _ := randState(1)
cs.SetPrivValidator(nil)
height, round := cs.Height, cs.Round
@ -142,7 +142,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) {
// a validator should not timeout of the prevote round (TODO: unless the block is really big!)
func TestStateEnterProposeYesPrivValidator(t *testing.T) {
cs, _ := randConsensusState(1)
cs, _ := randState(1)
height, round := cs.Height, cs.Round
// Listen for propose timeout event
@ -172,7 +172,7 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) {
}
func TestStateBadProposal(t *testing.T) {
cs1, vss := randConsensusState(2)
cs1, vss := randState(2)
height, round := cs1.Height, cs1.Round
vs2 := vss[1]
@ -231,7 +231,7 @@ func TestStateBadProposal(t *testing.T) {
// propose, prevote, and precommit a block
func TestStateFullRound1(t *testing.T) {
cs, vss := randConsensusState(1)
cs, vss := randState(1)
height, round := cs.Height, cs.Round
// NOTE: buffer capacity of 0 ensures we can validate prevote and last commit
@ -267,7 +267,7 @@ func TestStateFullRound1(t *testing.T) {
// nil is proposed, so prevote and precommit nil
func TestStateFullRoundNil(t *testing.T) {
cs, vss := randConsensusState(1)
cs, vss := randState(1)
height, round := cs.Height, cs.Round
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
@ -285,7 +285,7 @@ func TestStateFullRoundNil(t *testing.T) {
// run through propose, prevote, precommit commit with two validators
// where the first validator has to wait for votes from the second
func TestStateFullRound2(t *testing.T) {
cs1, vss := randConsensusState(2)
cs1, vss := randState(2)
vs2 := vss[1]
height, round := cs1.Height, cs1.Round
@ -325,7 +325,7 @@ func TestStateFullRound2(t *testing.T) {
// two validators, 4 rounds.
// two vals take turns proposing. val1 locks on first one, precommits nil on everything else
func TestStateLockNoPOL(t *testing.T) {
cs1, vss := randConsensusState(2)
cs1, vss := randState(2)
vs2 := vss[1]
height, round := cs1.Height, cs1.Round
@ -462,7 +462,7 @@ func TestStateLockNoPOL(t *testing.T) {
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
cs2, _ := randConsensusState(2) // needed so generated block is different than locked block
cs2, _ := randState(2) // needed so generated block is different than locked block
// before we time out into new round, set next proposal block
prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1)
if prop == nil || propBlock == nil {
@ -508,7 +508,7 @@ func TestStateLockNoPOL(t *testing.T) {
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestStateLockPOLRelock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -598,7 +598,7 @@ func TestStateLockPOLRelock(t *testing.T) {
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestStateLockPOLUnlock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -690,7 +690,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
// then a polka at round 2 that we lock on
// then we see the polka from round 1 but shouldn't unlock
func TestStateLockPOLSafety1(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -807,7 +807,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
// What we want:
// dont see P0, lock on P1 at R1, dont unlock using P0 at R2
func TestStateLockPOLSafety2(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -898,7 +898,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
// What we want:
// P0 proposes B0 at R3.
func TestProposeValidBlock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -986,7 +986,7 @@ func TestProposeValidBlock(t *testing.T) {
// What we want:
// P0 miss to lock B but set valid block to B after receiving delayed prevote.
func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -1046,7 +1046,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
// P0 miss to lock B as Proposal Block is missing, but set valid block to B after
// receiving delayed Block Proposal.
func TestSetValidBlockOnDelayedProposal(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -1100,7 +1100,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
// What we want:
// P0 waits for timeoutPrecommit before starting next round
func TestWaitingTimeoutOnNilPolka(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -1121,7 +1121,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) {
// What we want:
// P0 waits for timeoutPropose in the next round before entering prevote
func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -1155,7 +1155,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
// What we want:
// P0 jump to higher round, precommit and start precommit wait
func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -1189,7 +1189,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
// What we want:
// P0 wait for timeoutPropose to expire before sending prevote.
func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, 1
@ -1214,7 +1214,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
// What we want:
// P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet
func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, 1
@ -1248,7 +1248,7 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) {
// P0 receives 2/3+ Precommit for B for round 0, while being in round 1. It emits NewValidBlock event.
// After receiving block, it executes block and moves to the next height.
func TestCommitFromPreviousRound(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, 1
@ -1299,7 +1299,7 @@ func (n *fakeTxNotifier) Notify() {
func TestStartNextHeightCorrectly(t *testing.T) {
config.Consensus.SkipTimeoutCommit = false
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})}
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
@ -1354,7 +1354,7 @@ func TestStartNextHeightCorrectly(t *testing.T) {
func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
config.Consensus.SkipTimeoutCommit = false
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
@ -1413,7 +1413,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
/*
func TestStateSlashingPrevotes(t *testing.T) {
cs1, vss := randConsensusState(2)
cs1, vss := randState(2)
vs2 := vss[1]
@ -1448,7 +1448,7 @@ func TestStateSlashingPrevotes(t *testing.T) {
}
func TestStateSlashingPrecommits(t *testing.T) {
cs1, vss := randConsensusState(2)
cs1, vss := randState(2)
vs2 := vss[1]
@ -1493,7 +1493,7 @@ func TestStateSlashingPrecommits(t *testing.T) {
// 4 vals.
// we receive a final precommit after going into next round, but others might have gone to commit already!
func TestStateHalt1(t *testing.T) {
cs1, vss := randConsensusState(4)
cs1, vss := randState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
partSize := types.BlockPartSizeBytes
@ -1559,7 +1559,7 @@ func TestStateHalt1(t *testing.T) {
func TestStateOutputsBlockPartsStats(t *testing.T) {
// create dummy peer
cs, _ := randConsensusState(1)
cs, _ := randState(1)
peer := p2pmock.NewPeer(nil)
// 1) new block part
@ -1601,7 +1601,7 @@ func TestStateOutputsBlockPartsStats(t *testing.T) {
}
func TestStateOutputVoteStats(t *testing.T) {
cs, vss := randConsensusState(2)
cs, vss := randState(2)
// create dummy peer
peer := p2pmock.NewPeer(nil)


+ 2
- 1
consensus/wal.go View File

@ -206,7 +206,8 @@ func (wal *baseWAL) WriteSync(msg WALMessage) error {
}
if err := wal.FlushAndSync(); err != nil {
wal.Logger.Error("WriteSync failed to flush consensus wal. WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted",
wal.Logger.Error(`WriteSync failed to flush consensus wal.
WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted`,
"err", err)
return err
}


+ 1
- 1
consensus/wal_generator.go View File

@ -74,7 +74,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
mempool := mock.Mempool{}
evpool := sm.MockEvidencePool{}
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
consensusState.SetLogger(logger)
consensusState.SetEventBus(eventBus)
if privValidator != nil {


+ 4
- 4
crypto/ed25519/ed25519.go View File

@ -85,9 +85,9 @@ func (privKey PrivKeyEd25519) PubKey() crypto.PubKey {
func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool {
if otherEd, ok := other.(PrivKeyEd25519); ok {
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
} else {
return false
}
return false
}
// GenPrivKey generates a new ed25519 private key.
@ -164,7 +164,7 @@ func (pubKey PubKeyEd25519) String() string {
func (pubKey PubKeyEd25519) Equals(other crypto.PubKey) bool {
if otherEd, ok := other.(PubKeyEd25519); ok {
return bytes.Equal(pubKey[:], otherEd[:])
} else {
return false
}
return false
}

+ 2
- 2
crypto/merkle/simple_tree.go View File

@ -82,9 +82,9 @@ func SimpleHashFromByteSlicesIterative(input [][]byte) []byte {
rp += 2
} else {
items[wp] = items[rp]
rp += 1
rp++
}
wp += 1
wp++
}
size = wp
}


+ 1
- 1
evidence/codec.go View File

@ -9,7 +9,7 @@ import (
var cdc = amino.NewCodec()
func init() {
RegisterEvidenceMessages(cdc)
RegisterMessages(cdc)
cryptoamino.RegisterAmino(cdc)
types.RegisterEvidences(cdc)
}


+ 29
- 29
evidence/pool.go View File

@ -12,13 +12,13 @@ import (
"github.com/tendermint/tendermint/types"
)
// EvidencePool maintains a pool of valid evidence
// in an EvidenceStore.
type EvidencePool struct {
// Pool maintains a pool of valid evidence
// in an Store.
type Pool struct {
logger log.Logger
evidenceStore *EvidenceStore
evidenceList *clist.CList // concurrent linked-list of evidence
store *Store
evidenceList *clist.CList // concurrent linked-list of evidence
// needed to load validators to verify evidence
stateDB dbm.DB
@ -28,51 +28,51 @@ type EvidencePool struct {
state sm.State
}
func NewEvidencePool(stateDB, evidenceDB dbm.DB) *EvidencePool {
evidenceStore := NewEvidenceStore(evidenceDB)
evpool := &EvidencePool{
stateDB: stateDB,
state: sm.LoadState(stateDB),
logger: log.NewNopLogger(),
evidenceStore: evidenceStore,
evidenceList: clist.New(),
func NewPool(stateDB, evidenceDB dbm.DB) *Pool {
store := NewStore(evidenceDB)
evpool := &Pool{
stateDB: stateDB,
state: sm.LoadState(stateDB),
logger: log.NewNopLogger(),
store: store,
evidenceList: clist.New(),
}
return evpool
}
func (evpool *EvidencePool) EvidenceFront() *clist.CElement {
func (evpool *Pool) EvidenceFront() *clist.CElement {
return evpool.evidenceList.Front()
}
func (evpool *EvidencePool) EvidenceWaitChan() <-chan struct{} {
func (evpool *Pool) EvidenceWaitChan() <-chan struct{} {
return evpool.evidenceList.WaitChan()
}
// SetLogger sets the Logger.
func (evpool *EvidencePool) SetLogger(l log.Logger) {
func (evpool *Pool) SetLogger(l log.Logger) {
evpool.logger = l
}
// PriorityEvidence returns the priority evidence.
func (evpool *EvidencePool) PriorityEvidence() []types.Evidence {
return evpool.evidenceStore.PriorityEvidence()
func (evpool *Pool) PriorityEvidence() []types.Evidence {
return evpool.store.PriorityEvidence()
}
// PendingEvidence returns up to maxNum uncommitted evidence.
// If maxNum is -1, all evidence is returned.
func (evpool *EvidencePool) PendingEvidence(maxNum int64) []types.Evidence {
return evpool.evidenceStore.PendingEvidence(maxNum)
func (evpool *Pool) PendingEvidence(maxNum int64) []types.Evidence {
return evpool.store.PendingEvidence(maxNum)
}
// State returns the current state of the evpool.
func (evpool *EvidencePool) State() sm.State {
func (evpool *Pool) State() sm.State {
evpool.mtx.Lock()
defer evpool.mtx.Unlock()
return evpool.state
}
// Update loads the latest
func (evpool *EvidencePool) Update(block *types.Block, state sm.State) {
func (evpool *Pool) Update(block *types.Block, state sm.State) {
// sanity check
if state.LastBlockHeight != block.Height {
@ -94,7 +94,7 @@ func (evpool *EvidencePool) Update(block *types.Block, state sm.State) {
}
// AddEvidence checks the evidence is valid and adds it to the pool.
func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) {
func (evpool *Pool) AddEvidence(evidence types.Evidence) (err error) {
// TODO: check if we already have evidence for this
// validator at this height so we dont get spammed
@ -109,7 +109,7 @@ func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) {
_, val := valset.GetByAddress(evidence.Address())
priority := val.VotingPower
added := evpool.evidenceStore.AddNewEvidence(evidence, priority)
added := evpool.store.AddNewEvidence(evidence, priority)
if !added {
// evidence already known, just ignore
return
@ -124,11 +124,11 @@ func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) {
}
// MarkEvidenceAsCommitted marks all the evidence as committed and removes it from the queue.
func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []types.Evidence) {
func (evpool *Pool) MarkEvidenceAsCommitted(height int64, evidence []types.Evidence) {
// make a map of committed evidence to remove from the clist
blockEvidenceMap := make(map[string]struct{})
for _, ev := range evidence {
evpool.evidenceStore.MarkEvidenceAsCommitted(ev)
evpool.store.MarkEvidenceAsCommitted(ev)
blockEvidenceMap[evMapKey(ev)] = struct{}{}
}
@ -139,12 +139,12 @@ func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []typ
}
// IsCommitted returns true if we have already seen this exact evidence and it is already marked as committed.
func (evpool *EvidencePool) IsCommitted(evidence types.Evidence) bool {
ei := evpool.evidenceStore.getEvidenceInfo(evidence)
func (evpool *Pool) IsCommitted(evidence types.Evidence) bool {
ei := evpool.store.getInfo(evidence)
return ei.Evidence != nil && ei.Committed
}
func (evpool *EvidencePool) removeEvidence(height, maxAge int64, blockEvidenceMap map[string]struct{}) {
func (evpool *Pool) removeEvidence(height, maxAge int64, blockEvidenceMap map[string]struct{}) {
for e := evpool.evidenceList.Front(); e != nil; e = e.Next() {
ev := e.Value.(types.Evidence)


+ 2
- 2
evidence/pool_test.go View File

@ -57,7 +57,7 @@ func TestEvidencePool(t *testing.T) {
height := int64(5)
stateDB := initializeValidatorState(valAddr, height)
evidenceDB := dbm.NewMemDB()
pool := NewEvidencePool(stateDB, evidenceDB)
pool := NewPool(stateDB, evidenceDB)
goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr)
badEvidence := types.MockBadEvidence{MockGoodEvidence: goodEvidence}
@ -91,7 +91,7 @@ func TestEvidencePoolIsCommitted(t *testing.T) {
height := int64(42)
stateDB := initializeValidatorState(valAddr, height)
evidenceDB := dbm.NewMemDB()
pool := NewEvidencePool(stateDB, evidenceDB)
pool := NewPool(stateDB, evidenceDB)
// evidence not seen yet:
evidence := types.NewMockGoodEvidence(height, 0, valAddr)


+ 30
- 30
evidence/reactor.go View File

@ -22,31 +22,31 @@ const (
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
)
// EvidenceReactor handles evpool evidence broadcasting amongst peers.
type EvidenceReactor struct {
// Reactor handles evpool evidence broadcasting amongst peers.
type Reactor struct {
p2p.BaseReactor
evpool *EvidencePool
evpool *Pool
eventBus *types.EventBus
}
// NewEvidenceReactor returns a new EvidenceReactor with the given config and evpool.
func NewEvidenceReactor(evpool *EvidencePool) *EvidenceReactor {
evR := &EvidenceReactor{
// NewReactor returns a new Reactor with the given config and evpool.
func NewReactor(evpool *Pool) *Reactor {
evR := &Reactor{
evpool: evpool,
}
evR.BaseReactor = *p2p.NewBaseReactor("EvidenceReactor", evR)
evR.BaseReactor = *p2p.NewBaseReactor("Reactor", evR)
return evR
}
// SetLogger sets the Logger on the reactor and the underlying Evidence.
func (evR *EvidenceReactor) SetLogger(l log.Logger) {
func (evR *Reactor) SetLogger(l log.Logger) {
evR.Logger = l
evR.evpool.SetLogger(l)
}
// GetChannels implements Reactor.
// It returns the list of channels for this reactor.
func (evR *EvidenceReactor) GetChannels() []*p2p.ChannelDescriptor {
func (evR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: EvidenceChannel,
@ -56,13 +56,13 @@ func (evR *EvidenceReactor) GetChannels() []*p2p.ChannelDescriptor {
}
// AddPeer implements Reactor.
func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
func (evR *Reactor) AddPeer(peer p2p.Peer) {
go evR.broadcastEvidenceRoutine(peer)
}
// Receive implements Reactor.
// It adds any received evidence to the evpool.
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
if err != nil {
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
@ -79,7 +79,7 @@ func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
switch msg := msg.(type) {
case *EvidenceListMessage:
case *ListMessage:
for _, ev := range msg.Evidence {
err := evR.evpool.AddEvidence(ev)
if err != nil {
@ -94,7 +94,7 @@ func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
}
// SetEventSwitch implements events.Eventable.
func (evR *EvidenceReactor) SetEventBus(b *types.EventBus) {
func (evR *Reactor) SetEventBus(b *types.EventBus) {
evR.eventBus = b
}
@ -104,7 +104,7 @@ func (evR *EvidenceReactor) SetEventBus(b *types.EventBus) {
// sending available evidence to the peer.
// - If we're waiting for new evidence and the list is not empty,
// start iterating from the beginning again.
func (evR *EvidenceReactor) broadcastEvidenceRoutine(peer p2p.Peer) {
func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) {
var next *clist.CElement
for {
// This happens because the CElement we were looking at got garbage
@ -154,10 +154,10 @@ func (evR *EvidenceReactor) broadcastEvidenceRoutine(peer p2p.Peer) {
// Returns the message to send the peer, or nil if the evidence is invalid for the peer.
// If message is nil, return true if we should sleep and try again.
func (evR EvidenceReactor) checkSendEvidenceMessage(
func (evR Reactor) checkSendEvidenceMessage(
peer p2p.Peer,
ev types.Evidence,
) (msg EvidenceMessage, retry bool) {
) (msg Message, retry bool) {
// make sure the peer is up to date
evHeight := ev.Height()
peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
@ -193,7 +193,7 @@ func (evR EvidenceReactor) checkSendEvidenceMessage(
}
// send evidence
msg = &EvidenceListMessage{[]types.Evidence{ev}}
msg = &ListMessage{[]types.Evidence{ev}}
return msg, false
}
@ -205,18 +205,18 @@ type PeerState interface {
//-----------------------------------------------------------------------------
// Messages
// EvidenceMessage is a message sent or received by the EvidenceReactor.
type EvidenceMessage interface {
// Message is a message sent or received by the Reactor.
type Message interface {
ValidateBasic() error
}
func RegisterEvidenceMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*EvidenceMessage)(nil), nil)
cdc.RegisterConcrete(&EvidenceListMessage{},
"tendermint/evidence/EvidenceListMessage", nil)
func RegisterMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*Message)(nil), nil)
cdc.RegisterConcrete(&ListMessage{},
"tendermint/evidence/ListMessage", nil)
}
func decodeMsg(bz []byte) (msg EvidenceMessage, err error) {
func decodeMsg(bz []byte) (msg Message, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
@ -226,13 +226,13 @@ func decodeMsg(bz []byte) (msg EvidenceMessage, err error) {
//-------------------------------------
// EvidenceListMessage contains a list of evidence.
type EvidenceListMessage struct {
// ListMessage contains a list of evidence.
type ListMessage struct {
Evidence []types.Evidence
}
// ValidateBasic performs basic validation.
func (m *EvidenceListMessage) ValidateBasic() error {
func (m *ListMessage) ValidateBasic() error {
for i, ev := range m.Evidence {
if err := ev.ValidateBasic(); err != nil {
return fmt.Errorf("invalid evidence (#%d): %v", i, err)
@ -241,7 +241,7 @@ func (m *EvidenceListMessage) ValidateBasic() error {
return nil
}
// String returns a string representation of the EvidenceListMessage.
func (m *EvidenceListMessage) String() string {
return fmt.Sprintf("[EvidenceListMessage %v]", m.Evidence)
// String returns a string representation of the ListMessage.
func (m *ListMessage) String() string {
return fmt.Sprintf("[ListMessage %v]", m.Evidence)
}

+ 14
- 14
evidence/reactor_test.go View File

@ -31,15 +31,15 @@ func evidenceLogger() log.Logger {
}
// connect N evidence reactors through N switches
func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*EvidenceReactor {
func makeAndConnectReactors(config *cfg.Config, stateDBs []dbm.DB) []*Reactor {
N := len(stateDBs)
reactors := make([]*EvidenceReactor, N)
reactors := make([]*Reactor, N)
logger := evidenceLogger()
for i := 0; i < N; i++ {
evidenceDB := dbm.NewMemDB()
pool := NewEvidencePool(stateDBs[i], evidenceDB)
reactors[i] = NewEvidenceReactor(pool)
pool := NewPool(stateDBs[i], evidenceDB)
reactors[i] = NewReactor(pool)
reactors[i].SetLogger(logger.With("validator", i))
}
@ -52,7 +52,7 @@ func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*Ev
}
// wait for all evidence on all reactors
func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*EvidenceReactor) {
func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*Reactor) {
// wait for the evidence in all evpools
wg := new(sync.WaitGroup)
for i := 0; i < len(reactors); i++ {
@ -80,7 +80,7 @@ func _waitForEvidence(
wg *sync.WaitGroup,
evs types.EvidenceList,
reactorIdx int,
reactors []*EvidenceReactor,
reactors []*Reactor,
) {
evpool := reactors[reactorIdx].evpool
for len(evpool.PendingEvidence(-1)) != len(evs) {
@ -103,7 +103,7 @@ func _waitForEvidence(
wg.Done()
}
func sendEvidence(t *testing.T, evpool *EvidencePool, valAddr []byte, n int) types.EvidenceList {
func sendEvidence(t *testing.T, evpool *Pool, valAddr []byte, n int) types.EvidenceList {
evList := make([]types.Evidence, n)
for i := 0; i < n; i++ {
ev := types.NewMockGoodEvidence(int64(i+1), 0, valAddr)
@ -133,7 +133,7 @@ func TestReactorBroadcastEvidence(t *testing.T) {
}
// make reactors from statedb
reactors := makeAndConnectEvidenceReactors(config, stateDBs)
reactors := makeAndConnectReactors(config, stateDBs)
// set the peer height on each reactor
for _, r := range reactors {
@ -169,7 +169,7 @@ func TestReactorSelectiveBroadcast(t *testing.T) {
stateDB2 := initializeValidatorState(valAddr, height2)
// make reactors from statedb
reactors := makeAndConnectEvidenceReactors(config, []dbm.DB{stateDB1, stateDB2})
reactors := makeAndConnectReactors(config, []dbm.DB{stateDB1, stateDB2})
// set the peer height on each reactor
for _, r := range reactors {
@ -194,15 +194,15 @@ func TestReactorSelectiveBroadcast(t *testing.T) {
peers := reactors[1].Switch.Peers().List()
assert.Equal(t, 1, len(peers))
}
func TestEvidenceListMessageValidationBasic(t *testing.T) {
func TestListMessageValidationBasic(t *testing.T) {
testCases := []struct {
testName string
malleateEvListMsg func(*EvidenceListMessage)
malleateEvListMsg func(*ListMessage)
expectErr bool
}{
{"Good EvidenceListMessage", func(evList *EvidenceListMessage) {}, false},
{"Invalid EvidenceListMessage", func(evList *EvidenceListMessage) {
{"Good ListMessage", func(evList *ListMessage) {}, false},
{"Invalid ListMessage", func(evList *ListMessage) {
evList.Evidence = append(evList.Evidence,
&types.DuplicateVoteEvidence{PubKey: secp256k1.GenPrivKey().PubKey()})
}, true},
@ -210,7 +210,7 @@ func TestEvidenceListMessageValidationBasic(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
evListMsg := &EvidenceListMessage{}
evListMsg := &ListMessage{}
n := 3
valAddr := []byte("myval")
evListMsg.Evidence = make([]types.Evidence, n)


+ 29
- 29
evidence/store.go View File

@ -20,12 +20,12 @@ Impl:
Schema for indexing evidence (note you need both height and hash to find a piece of evidence):
"evidence-lookup"/<evidence-height>/<evidence-hash> -> EvidenceInfo
"evidence-outqueue"/<priority>/<evidence-height>/<evidence-hash> -> EvidenceInfo
"evidence-pending"/<evidence-height>/<evidence-hash> -> EvidenceInfo
"evidence-lookup"/<evidence-height>/<evidence-hash> -> Info
"evidence-outqueue"/<priority>/<evidence-height>/<evidence-hash> -> Info
"evidence-pending"/<evidence-height>/<evidence-hash> -> Info
*/
type EvidenceInfo struct {
type Info struct {
Committed bool
Priority int64
Evidence types.Evidence
@ -58,25 +58,25 @@ func keyPending(evidence types.Evidence) []byte {
return _key("%s/%s/%X", baseKeyPending, bE(evidence.Height()), evidence.Hash())
}
func _key(fmt_ string, o ...interface{}) []byte {
return []byte(fmt.Sprintf(fmt_, o...))
func _key(format string, o ...interface{}) []byte {
return []byte(fmt.Sprintf(format, o...))
}
// EvidenceStore is a store of all the evidence we've seen, including
// Store is a store of all the evidence we've seen, including
// evidence that has been committed, evidence that has been verified but not broadcast,
// and evidence that has been broadcast but not yet committed.
type EvidenceStore struct {
type Store struct {
db dbm.DB
}
func NewEvidenceStore(db dbm.DB) *EvidenceStore {
return &EvidenceStore{
func NewStore(db dbm.DB) *Store {
return &Store{
db: db,
}
}
// PriorityEvidence returns the evidence from the outqueue, sorted by highest priority.
func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) {
func (store *Store) PriorityEvidence() (evidence []types.Evidence) {
// reverse the order so highest priority is first
l := store.listEvidence(baseKeyOutqueue, -1)
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
@ -88,14 +88,14 @@ func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) {
// PendingEvidence returns up to maxNum known, uncommitted evidence.
// If maxNum is -1, all evidence is returned.
func (store *EvidenceStore) PendingEvidence(maxNum int64) (evidence []types.Evidence) {
func (store *Store) PendingEvidence(maxNum int64) (evidence []types.Evidence) {
return store.listEvidence(baseKeyPending, maxNum)
}
// listEvidence lists up to maxNum pieces of evidence for the given prefix key.
// It is wrapped by PriorityEvidence and PendingEvidence for convenience.
// If maxNum is -1, there's no cap on the size of returned evidence.
func (store *EvidenceStore) listEvidence(prefixKey string, maxNum int64) (evidence []types.Evidence) {
func (store *Store) listEvidence(prefixKey string, maxNum int64) (evidence []types.Evidence) {
var count int64
iter := dbm.IteratePrefix(store.db, []byte(prefixKey))
defer iter.Close()
@ -107,7 +107,7 @@ func (store *EvidenceStore) listEvidence(prefixKey string, maxNum int64) (eviden
}
count++
var ei EvidenceInfo
var ei Info
err := cdc.UnmarshalBinaryBare(val, &ei)
if err != nil {
panic(err)
@ -117,16 +117,16 @@ func (store *EvidenceStore) listEvidence(prefixKey string, maxNum int64) (eviden
return evidence
}
// GetEvidenceInfo fetches the EvidenceInfo with the given height and hash.
// GetInfo fetches the Info with the given height and hash.
// If not found, ei.Evidence is nil.
func (store *EvidenceStore) GetEvidenceInfo(height int64, hash []byte) EvidenceInfo {
func (store *Store) GetInfo(height int64, hash []byte) Info {
key := keyLookupFromHeightAndHash(height, hash)
val := store.db.Get(key)
if len(val) == 0 {
return EvidenceInfo{}
return Info{}
}
var ei EvidenceInfo
var ei Info
err := cdc.UnmarshalBinaryBare(val, &ei)
if err != nil {
panic(err)
@ -136,14 +136,14 @@ func (store *EvidenceStore) GetEvidenceInfo(height int64, hash []byte) EvidenceI
// AddNewEvidence adds the given evidence to the database.
// It returns false if the evidence is already stored.
func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int64) bool {
func (store *Store) AddNewEvidence(evidence types.Evidence, priority int64) bool {
// check if we already have seen it
ei := store.getEvidenceInfo(evidence)
ei := store.getInfo(evidence)
if ei.Evidence != nil {
return false
}
ei = EvidenceInfo{
ei = Info{
Committed: false,
Priority: priority,
Evidence: evidence,
@ -164,8 +164,8 @@ func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int
}
// MarkEvidenceAsBroadcasted removes evidence from Outqueue.
func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) {
ei := store.getEvidenceInfo(evidence)
func (store *Store) MarkEvidenceAsBroadcasted(evidence types.Evidence) {
ei := store.getInfo(evidence)
if ei.Evidence == nil {
// nothing to do; we did not store the evidence yet (AddNewEvidence):
return
@ -176,15 +176,15 @@ func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) {
}
// MarkEvidenceAsCommitted removes evidence from pending and outqueue and sets the state to committed.
func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
func (store *Store) MarkEvidenceAsCommitted(evidence types.Evidence) {
// if its committed, its been broadcast
store.MarkEvidenceAsBroadcasted(evidence)
pendingKey := keyPending(evidence)
store.db.Delete(pendingKey)
// committed EvidenceInfo doens't need priority
ei := EvidenceInfo{
// committed Info doens't need priority
ei := Info{
Committed: true,
Evidence: evidence,
Priority: 0,
@ -197,7 +197,7 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
//---------------------------------------------------
// utils
// getEvidenceInfo is convenience for calling GetEvidenceInfo if we have the full evidence.
func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInfo {
return store.GetEvidenceInfo(evidence.Height(), evidence.Hash())
// getInfo is convenience for calling GetInfo if we have the full evidence.
func (store *Store) getInfo(evidence types.Evidence) Info {
return store.GetInfo(evidence.Height(), evidence.Hash())
}

+ 6
- 6
evidence/store_test.go View File

@ -14,7 +14,7 @@ func TestStoreAddDuplicate(t *testing.T) {
assert := assert.New(t)
db := dbm.NewMemDB()
store := NewEvidenceStore(db)
store := NewStore(db)
priority := int64(10)
ev := types.NewMockGoodEvidence(2, 1, []byte("val1"))
@ -31,7 +31,7 @@ func TestStoreCommitDuplicate(t *testing.T) {
assert := assert.New(t)
db := dbm.NewMemDB()
store := NewEvidenceStore(db)
store := NewStore(db)
priority := int64(10)
ev := types.NewMockGoodEvidence(2, 1, []byte("val1"))
@ -46,7 +46,7 @@ func TestStoreMark(t *testing.T) {
assert := assert.New(t)
db := dbm.NewMemDB()
store := NewEvidenceStore(db)
store := NewStore(db)
// before we do anything, priority/pending are empty
priorityEv := store.PriorityEvidence()
@ -61,7 +61,7 @@ func TestStoreMark(t *testing.T) {
assert.True(added)
// get the evidence. verify. should be uncommitted
ei := store.GetEvidenceInfo(ev.Height(), ev.Hash())
ei := store.GetInfo(ev.Height(), ev.Hash())
assert.Equal(ev, ei.Evidence)
assert.Equal(priority, ei.Priority)
assert.False(ei.Committed)
@ -88,7 +88,7 @@ func TestStoreMark(t *testing.T) {
// evidence should show committed
newPriority := int64(0)
ei = store.GetEvidenceInfo(ev.Height(), ev.Hash())
ei = store.GetInfo(ev.Height(), ev.Hash())
assert.Equal(ev, ei.Evidence)
assert.Equal(newPriority, ei.Priority)
assert.True(ei.Committed)
@ -98,7 +98,7 @@ func TestStorePriority(t *testing.T) {
assert := assert.New(t)
db := dbm.NewMemDB()
store := NewEvidenceStore(db)
store := NewStore(db)
// sorted by priority and then height
cases := []struct {


+ 1
- 1
libs/events/events_test.go View File

@ -94,7 +94,7 @@ func TestAddListenerForDifferentEvents(t *testing.T) {
go fireEvents(evsw, "event1", doneSending1, uint64(1))
go fireEvents(evsw, "event2", doneSending2, uint64(1))
go fireEvents(evsw, "event3", doneSending3, uint64(1))
var checkSum uint64 = 0
var checkSum uint64
checkSum += <-doneSending1
checkSum += <-doneSending2
checkSum += <-doneSending3


+ 9
- 8
libs/fail/fail.go View File

@ -11,14 +11,15 @@ func envSet() int {
if callIndexToFailS == "" {
return -1
} else {
var err error
callIndexToFail, err := strconv.Atoi(callIndexToFailS)
if err != nil {
return -1
}
return callIndexToFail
}
var err error
callIndexToFail, err := strconv.Atoi(callIndexToFailS)
if err != nil {
return -1
}
return callIndexToFail
}
// Fail when FAIL_TEST_INDEX == callIndex
@ -34,7 +35,7 @@ func Fail() {
Exit()
}
callIndex += 1
callIndex++
}
func Exit() {


+ 6
- 6
libs/log/tracing_logger_test.go View File

@ -17,8 +17,8 @@ func TestTracingLogger(t *testing.T) {
logger := log.NewTMJSONLogger(&buf)
logger1 := log.NewTracingLogger(logger)
err1 := errors.New("courage is grace under pressure.")
err2 := errors.New("it does not matter how slowly you go, so long as you do not stop.")
err1 := errors.New("courage is grace under pressure")
err2 := errors.New("it does not matter how slowly you go, so long as you do not stop")
logger1.With("err1", err1).Info("foo", "err2", err2)
want := strings.Replace(
@ -38,14 +38,14 @@ func TestTracingLogger(t *testing.T) {
buf.Reset()
logger.With(
"err1", stderr.New("Opportunities don't happen. You create them."),
"err1", stderr.New("opportunities don't happen. You create them"),
).Info(
"foo", "err2", stderr.New("Once you choose hope, anything's possible."),
"foo", "err2", stderr.New("once you choose hope, anything's possible"),
)
want = `{"_msg":"foo",` +
`"err1":"Opportunities don't happen. You create them.",` +
`"err2":"Once you choose hope, anything's possible.",` +
`"err1":"opportunities don't happen. You create them",` +
`"err2":"once you choose hope, anything's possible",` +
`"level":"info"}`
have = strings.TrimSpace(buf.String())
if want != have {


+ 1
- 1
lite/client/provider_test.go View File

@ -14,7 +14,7 @@ import (
)
func TestMain(m *testing.M) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app)
code := m.Run()


+ 21
- 19
lite/dbprovider.go View File

@ -128,17 +128,18 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int
err := dbp.cdc.UnmarshalBinaryLengthPrefixed(shBz, &sh)
if err != nil {
return FullCommit{}, err
} else {
lfc, err := dbp.fillFullCommit(sh)
if err == nil {
dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height())
return lfc, nil
} else {
dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc)
dbp.logger.Error(fmt.Sprintf("%+v", err))
return lfc, err
}
}
lfc, err := dbp.fillFullCommit(sh)
if err == nil {
dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height())
return lfc, nil
}
dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc)
dbp.logger.Error(fmt.Sprintf("%+v", err))
return lfc, err
}
}
return FullCommit{}, lerr.ErrCommitNotFound()
@ -208,16 +209,17 @@ func (dbp *DBProvider) deleteAfterN(chainID string, after int) error {
_, height, ok := parseChainKeyPrefix(key)
if !ok {
return fmt.Errorf("unexpected key %v", key)
} else {
if height < lastHeight {
lastHeight = height
numSeen += 1
}
if numSeen > after {
dbp.db.Delete(key)
numDeleted += 1
}
}
if height < lastHeight {
lastHeight = height
numSeen++
}
if numSeen > after {
dbp.db.Delete(key)
numDeleted++
}
itr.Next()
}


+ 5
- 5
lite/dynamic_verifier_test.go View File

@ -127,7 +127,7 @@ func TestDynamicVerify(t *testing.T) {
}
func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.ValidatorSet, chainID string) FullCommit {
height += 1
height++
consHash := []byte("special-params")
appHash := []byte(fmt.Sprintf("h=%d", height))
resHash := []byte(fmt.Sprintf("res=%d", height))
@ -186,9 +186,9 @@ func TestInquirerVerifyHistorical(t *testing.T) {
err = cert.Verify(sh)
require.Nil(err, "%+v", err)
assert.Equal(fcz[7].Height(), cert.LastTrustedHeight())
fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
commit, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
require.NotNil(err, "%+v", err)
assert.Equal(fc_, (FullCommit{}))
assert.Equal(commit, (FullCommit{}))
// With fcz[9] Verify will update last trusted height.
err = source.SaveFullCommit(fcz[9])
@ -197,9 +197,9 @@ func TestInquirerVerifyHistorical(t *testing.T) {
err = cert.Verify(sh)
require.Nil(err, "%+v", err)
assert.Equal(fcz[8].Height(), cert.LastTrustedHeight())
fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
commit, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
require.Nil(err, "%+v", err)
assert.Equal(fc_.Height(), fcz[8].Height())
assert.Equal(commit.Height(), fcz[8].Height())
// Add access to all full commits via untrusted source.
for i := 0; i < count; i++ {


+ 1
- 1
lite/helpers.go View File

@ -9,7 +9,7 @@ import (
tmtime "github.com/tendermint/tendermint/types/time"
)
// privKeys is a helper type for testing.
// PrivKeys is a helper type for testing.
//
// It lets us simulate signing with many keys. The main use case is to create
// a set, and call GenSignedHeader to get properly signed header for testing.


+ 5
- 5
lite/multiprovider.go View File

@ -47,8 +47,8 @@ func (mc *multiProvider) SaveFullCommit(fc FullCommit) (err error) {
// Returns the first error encountered.
func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) {
for _, p := range mc.providers {
var fc_ FullCommit
fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight)
var commit FullCommit
commit, err = p.LatestFullCommit(chainID, minHeight, maxHeight)
if lerr.IsErrCommitNotFound(err) {
err = nil
continue
@ -56,9 +56,9 @@ func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight i
return
}
if fc == (FullCommit{}) {
fc = fc_
} else if fc_.Height() > fc.Height() {
fc = fc_
fc = commit
} else if commit.Height() > fc.Height() {
fc = commit
}
if fc.Height() == maxHeight {
return


+ 16
- 13
lite/proxy/query.go View File

@ -24,7 +24,7 @@ func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rp
val cmn.HexBytes, height int64, proof *merkle.Proof, err error) {
if reqHeight < 0 {
err = errors.New("Height cannot be negative")
err = errors.New("height cannot be negative")
return
}
@ -54,7 +54,7 @@ func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts
// Validate the response, e.g. height.
if resp.IsErr() {
err = errors.Errorf("Query error for key %d: %d", key, resp.Code)
err = errors.Errorf("query error for key %d: %d", key, resp.Code)
return nil, err
}
@ -62,7 +62,7 @@ func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts
return nil, lerr.ErrEmptyTree()
}
if resp.Height == 0 {
return nil, errors.New("Height returned is zero")
return nil, errors.New("height returned is zero")
}
// AppHash for height H is in header H+1
@ -79,24 +79,27 @@ func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts
if err != nil {
return nil, err
}
kp := merkle.KeyPath{}
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
kp = kp.AppendKey(resp.Key, merkle.KeyEncodingURL)
err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, kp.String(), resp.Value)
if err != nil {
return nil, errors.Wrap(err, "Couldn't verify value proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
} else {
// Value absent
// Validate the proof against the certified header to ensure data integrity.
// XXX How do we encode the key into a string...
err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key))
if err != nil {
return nil, errors.Wrap(err, "Couldn't verify absence proof")
return nil, errors.Wrap(err, "couldn't verify value proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
}
// Value absent
// Validate the proof against the certified header to ensure data integrity.
// XXX How do we encode the key into a string...
err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key))
if err != nil {
return nil, errors.Wrap(err, "couldn't verify absence proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
}
func parseQueryStorePath(path string) (storeName string, err error) {


+ 1
- 1
lite/proxy/query_test.go View File

@ -27,7 +27,7 @@ var waitForEventTimeout = 5 * time.Second
// TODO fix tests!!
func TestMain(m *testing.M) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
node = rpctest.StartTendermint(app)
code := m.Run()


+ 1
- 1
lite2/provider/http/http_test.go View File

@ -14,7 +14,7 @@ import (
)
func TestMain(m *testing.M) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app)
code := m.Run()


+ 2
- 2
mempool/bench_test.go View File

@ -9,7 +9,7 @@ import (
)
func BenchmarkReap(b *testing.B) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
@ -27,7 +27,7 @@ func BenchmarkReap(b *testing.B) {
}
func BenchmarkCheckTx(b *testing.B) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()


+ 3
- 3
mempool/cache_test.go View File

@ -24,19 +24,19 @@ func TestCacheRemove(t *testing.T) {
txs[i] = txBytes
cache.Push(txBytes)
// make sure its added to both the linked list and the map
require.Equal(t, i+1, len(cache.map_))
require.Equal(t, i+1, len(cache.cacheMap))
require.Equal(t, i+1, cache.list.Len())
}
for i := 0; i < numTxs; i++ {
cache.Remove(txs[i])
// make sure its removed from both the map and the linked list
require.Equal(t, numTxs-(i+1), len(cache.map_))
require.Equal(t, numTxs-(i+1), len(cache.cacheMap))
require.Equal(t, numTxs-(i+1), cache.list.Len())
}
}
func TestCacheAfterUpdate(t *testing.T) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()


+ 13
- 13
mempool/clist_mempool.go View File

@ -638,10 +638,10 @@ type txCache interface {
// mapTxCache maintains a LRU cache of transactions. This only stores the hash
// of the tx, due to memory concerns.
type mapTxCache struct {
mtx sync.Mutex
size int
map_ map[[sha256.Size]byte]*list.Element
list *list.List
mtx sync.Mutex
size int
cacheMap map[[sha256.Size]byte]*list.Element
list *list.List
}
var _ txCache = (*mapTxCache)(nil)
@ -649,16 +649,16 @@ var _ txCache = (*mapTxCache)(nil)
// newMapTxCache returns a new mapTxCache.
func newMapTxCache(cacheSize int) *mapTxCache {
return &mapTxCache{
size: cacheSize,
map_: make(map[[sha256.Size]byte]*list.Element, cacheSize),
list: list.New(),
size: cacheSize,
cacheMap: make(map[[sha256.Size]byte]*list.Element, cacheSize),
list: list.New(),
}
}
// Reset resets the cache to an empty state.
func (cache *mapTxCache) Reset() {
cache.mtx.Lock()
cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size)
cache.cacheMap = make(map[[sha256.Size]byte]*list.Element, cache.size)
cache.list.Init()
cache.mtx.Unlock()
}
@ -671,7 +671,7 @@ func (cache *mapTxCache) Push(tx types.Tx) bool {
// Use the tx hash in the cache
txHash := txKey(tx)
if moved, exists := cache.map_[txHash]; exists {
if moved, exists := cache.cacheMap[txHash]; exists {
cache.list.MoveToBack(moved)
return false
}
@ -679,13 +679,13 @@ func (cache *mapTxCache) Push(tx types.Tx) bool {
if cache.list.Len() >= cache.size {
popped := cache.list.Front()
poppedTxHash := popped.Value.([sha256.Size]byte)
delete(cache.map_, poppedTxHash)
delete(cache.cacheMap, poppedTxHash)
if popped != nil {
cache.list.Remove(popped)
}
}
e := cache.list.PushBack(txHash)
cache.map_[txHash] = e
cache.cacheMap[txHash] = e
return true
}
@ -693,8 +693,8 @@ func (cache *mapTxCache) Push(tx types.Tx) bool {
func (cache *mapTxCache) Remove(tx types.Tx) {
cache.mtx.Lock()
txHash := txKey(tx)
popped := cache.map_[txHash]
delete(cache.map_, txHash)
popped := cache.cacheMap[txHash]
delete(cache.cacheMap, txHash)
if popped != nil {
cache.list.Remove(popped)
}


+ 10
- 10
mempool/clist_mempool_test.go View File

@ -90,7 +90,7 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs
}
func TestReapMaxBytesMaxGas(t *testing.T) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
@ -139,7 +139,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
}
func TestMempoolFilters(t *testing.T) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
@ -178,7 +178,7 @@ func TestMempoolFilters(t *testing.T) {
}
func TestMempoolUpdate(t *testing.T) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
@ -213,7 +213,7 @@ func TestMempoolUpdate(t *testing.T) {
}
func TestTxsAvailable(t *testing.T) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
@ -257,7 +257,7 @@ func TestTxsAvailable(t *testing.T) {
}
func TestSerialReap(t *testing.T) {
app := counter.NewCounterApplication(true)
app := counter.NewApplication(true)
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
cc := proxy.NewLocalClientCreator(app)
@ -380,7 +380,7 @@ func TestMempoolCloseWAL(t *testing.T) {
// 3. Create the mempool
wcfg := cfg.DefaultConfig()
wcfg.Mempool.RootDir = rootDir
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
defer cleanup()
@ -421,7 +421,7 @@ func txMessageSize(tx types.Tx) int {
}
func TestMempoolMaxMsgSize(t *testing.T) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempl, cleanup := newMempoolWithApp(cc)
defer cleanup()
@ -472,7 +472,7 @@ func TestMempoolMaxMsgSize(t *testing.T) {
}
func TestMempoolTxsBytes(t *testing.T) {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
config := cfg.ResetTestRoot("mempool_test")
config.Mempool.MaxTxsBytes = 10
@ -508,7 +508,7 @@ func TestMempoolTxsBytes(t *testing.T) {
}
// 6. zero after tx is rechecked and removed due to not being valid anymore
app2 := counter.NewCounterApplication(true)
app2 := counter.NewApplication(true)
cc = proxy.NewLocalClientCreator(app2)
mempool, cleanup = newMempoolWithApp(cc)
defer cleanup()
@ -543,7 +543,7 @@ func TestMempoolTxsBytes(t *testing.T) {
// since otherwise we're not actually testing the concurrency of the mempool here!
func TestMempoolRemoteAppConcurrency(t *testing.T) {
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6))
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc, server := newRemoteApp(t, sockPath, app)
defer server.Stop()
config := cfg.ResetTestRoot("mempool_test")


+ 1
- 1
mempool/codec.go View File

@ -7,5 +7,5 @@ import (
var cdc = amino.NewCodec()
func init() {
RegisterMempoolMessages(cdc)
RegisterMessages(cdc)
}

+ 6
- 6
mempool/reactor.go View File

@ -257,15 +257,15 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
//-----------------------------------------------------------------------------
// Messages
// MempoolMessage is a message sent or received by the Reactor.
type MempoolMessage interface{}
// Message is a message sent or received by the Reactor.
type Message interface{}
func RegisterMempoolMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*MempoolMessage)(nil), nil)
func RegisterMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*Message)(nil), nil)
cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
}
func (memR *Reactor) decodeMsg(bz []byte) (msg MempoolMessage, err error) {
func (memR *Reactor) decodeMsg(bz []byte) (msg Message, err error) {
maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
if l := len(bz); l > maxMsgSize {
return msg, ErrTxTooLarge{maxMsgSize, l}
@ -276,7 +276,7 @@ func (memR *Reactor) decodeMsg(bz []byte) (msg MempoolMessage, err error) {
//-------------------------------------
// TxMessage is a MempoolMessage containing a transaction.
// TxMessage is a Message containing a transaction.
type TxMessage struct {
Tx types.Tx
}


+ 1
- 1
mempool/reactor_test.go View File

@ -46,7 +46,7 @@ func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
reactors := make([]*Reactor, n)
logger := mempoolLogger()
for i := 0; i < n; i++ {
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()


+ 5
- 5
node/id.go View File

@ -6,18 +6,18 @@ import (
"github.com/tendermint/tendermint/crypto"
)
type NodeID struct {
type ID struct {
Name string
PubKey crypto.PubKey
}
type PrivNodeID struct {
NodeID
ID
PrivKey crypto.PrivKey
}
type NodeGreeting struct {
NodeID
type Greeting struct {
ID
Version string
ChainID string
Message string
@ -25,7 +25,7 @@ type NodeGreeting struct {
}
type SignedNodeGreeting struct {
NodeGreeting
Greeting
Signature []byte
}


+ 27
- 27
node/node.go View File

@ -80,7 +80,7 @@ func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
}
// NodeProvider takes a config and a logger and returns a ready to go Node.
type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
type Provider func(*cfg.Config, log.Logger) (*Node, error)
// DefaultNewNode returns a Tendermint node with default settings for the
// PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
@ -191,12 +191,12 @@ type Node struct {
bcReactor p2p.Reactor // for fast-syncing
mempoolReactor *mempl.Reactor // for gossipping transactions
mempool mempl.Mempool
consensusState *cs.ConsensusState // latest consensus state
consensusReactor *cs.ConsensusReactor // for participating in the consensus
pexReactor *pex.PEXReactor // for exchanging peer addresses
evidencePool *evidence.EvidencePool // tracking evidence
proxyApp proxy.AppConns // connection to the application
rpcListeners []net.Listener // rpc servers
consensusState *cs.State // latest consensus state
consensusReactor *cs.Reactor // for participating in the consensus
pexReactor *pex.Reactor // for exchanging peer addresses
evidencePool *evidence.Pool // tracking evidence
proxyApp proxy.AppConns // connection to the application
rpcListeners []net.Listener // rpc servers
txIndexer txindex.TxIndexer
indexerService *txindex.IndexerService
prometheusSrv *http.Server
@ -339,16 +339,16 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
}
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
stateDB dbm.DB, logger log.Logger) (*evidence.EvidenceReactor, *evidence.EvidencePool, error) {
stateDB dbm.DB, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
if err != nil {
return nil, nil, err
}
evidenceLogger := logger.With("module", "evidence")
evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB)
evidencePool := evidence.NewPool(stateDB, evidenceDB)
evidencePool.SetLogger(evidenceLogger)
evidenceReactor := evidence.NewEvidenceReactor(evidencePool)
evidenceReactor := evidence.NewReactor(evidencePool)
evidenceReactor.SetLogger(evidenceLogger)
return evidenceReactor, evidencePool, nil
}
@ -378,14 +378,14 @@ func createConsensusReactor(config *cfg.Config,
blockExec *sm.BlockExecutor,
blockStore sm.BlockStore,
mempool *mempl.CListMempool,
evidencePool *evidence.EvidencePool,
evidencePool *evidence.Pool,
privValidator types.PrivValidator,
csMetrics *cs.Metrics,
fastSync bool,
eventBus *types.EventBus,
consensusLogger log.Logger) (*consensus.ConsensusReactor, *consensus.ConsensusState) {
consensusLogger log.Logger) (*consensus.Reactor, *consensus.State) {
consensusState := cs.NewConsensusState(
consensusState := cs.NewState(
config.Consensus,
state.Copy(),
blockExec,
@ -398,7 +398,7 @@ func createConsensusReactor(config *cfg.Config,
if privValidator != nil {
consensusState.SetPrivValidator(privValidator)
}
consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
consensusReactor := cs.NewReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
consensusReactor.SetLogger(consensusLogger)
// services which will be publishing and/or subscribing for messages (events)
// consensusReactor will set it on consensusState and blockExecutor
@ -476,8 +476,8 @@ func createSwitch(config *cfg.Config,
peerFilters []p2p.PeerFilterFunc,
mempoolReactor *mempl.Reactor,
bcReactor p2p.Reactor,
consensusReactor *consensus.ConsensusReactor,
evidenceReactor *evidence.EvidenceReactor,
consensusReactor *consensus.Reactor,
evidenceReactor *evidence.Reactor,
nodeInfo p2p.NodeInfo,
nodeKey *p2p.NodeKey,
p2pLogger log.Logger) *p2p.Switch {
@ -529,11 +529,11 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
}
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
sw *p2p.Switch, logger log.Logger) *pex.PEXReactor {
sw *p2p.Switch, logger log.Logger) *pex.Reactor {
// TODO persistent peers ? so we can have their DNS addrs saved
pexReactor := pex.NewPEXReactor(addrBook,
&pex.PEXReactorConfig{
pexReactor := pex.NewReactor(addrBook,
&pex.ReactorConfig{
Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
SeedMode: config.P2P.SeedMode,
// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
@ -700,7 +700,7 @@ func NewNode(config *cfg.Config,
//
// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
// Note we currently use the addrBook regardless at least for AddOurAddress
var pexReactor *pex.PEXReactor
var pexReactor *pex.Reactor
if config.P2P.PexReactor {
pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
}
@ -992,12 +992,12 @@ func (n *Node) BlockStore() *store.BlockStore {
}
// ConsensusState returns the Node's ConsensusState.
func (n *Node) ConsensusState() *cs.ConsensusState {
func (n *Node) ConsensusState() *cs.State {
return n.consensusState
}
// ConsensusReactor returns the Node's ConsensusReactor.
func (n *Node) ConsensusReactor() *cs.ConsensusReactor {
func (n *Node) ConsensusReactor() *cs.Reactor {
return n.consensusReactor
}
@ -1012,12 +1012,12 @@ func (n *Node) Mempool() mempl.Mempool {
}
// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
func (n *Node) PEXReactor() *pex.PEXReactor {
func (n *Node) PEXReactor() *pex.Reactor {
return n.pexReactor
}
// EvidencePool returns the Node's EvidencePool.
func (n *Node) EvidencePool() *evidence.EvidencePool {
func (n *Node) EvidencePool() *evidence.Pool {
return n.evidencePool
}
@ -1092,9 +1092,9 @@ func makeNodeInfo(
state.Version.Consensus.Block,
state.Version.Consensus.App,
),
ID_: nodeKey.ID(),
Network: genDoc.ChainID,
Version: version.TMCoreSemVer,
DefaultNodeID: nodeKey.ID(),
Network: genDoc.ChainID,
Version: version.TMCoreSemVer,
Channels: []byte{
bcChannel,
cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,


+ 2
- 2
node/node_test.go View File

@ -219,7 +219,7 @@ func testFreeAddr(t *testing.T) string {
func TestCreateProposalBlock(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
cc := proxy.NewLocalClientCreator(kvstore.NewApplication())
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.Nil(t, err)
@ -249,7 +249,7 @@ func TestCreateProposalBlock(t *testing.T) {
types.RegisterMockEvidencesGlobal() // XXX!
evidence.RegisterMockEvidences()
evidenceDB := dbm.NewMemDB()
evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB)
evidencePool := evidence.NewPool(stateDB, evidenceDB)
evidencePool.SetLogger(logger)
// fill the evidence pool with more evidence


+ 2
- 2
p2p/mock/peer.go View File

@ -45,8 +45,8 @@ func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true }
func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true }
func (mp *Peer) NodeInfo() p2p.NodeInfo {
return p2p.DefaultNodeInfo{
ID_: mp.addr.ID,
ListenAddr: mp.addr.DialString(),
DefaultNodeID: mp.addr.ID,
ListenAddr: mp.addr.DialString(),
}
}
func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }


+ 6
- 6
p2p/node_info.go View File

@ -77,8 +77,8 @@ type DefaultNodeInfo struct {
// Authenticate
// TODO: replace with NetAddress
ID_ ID `json:"id"` // authenticated identifier
ListenAddr string `json:"listen_addr"` // accepting incoming
DefaultNodeID ID `json:"id"` // authenticated identifier
ListenAddr string `json:"listen_addr"` // accepting incoming
// Check compatibility.
// Channels are HexBytes so easier to read as JSON
@ -99,7 +99,7 @@ type DefaultNodeInfoOther struct {
// ID returns the node's peer ID.
func (info DefaultNodeInfo) ID() ID {
return info.ID_
return info.DefaultNodeID
}
// Validate checks the self-reported DefaultNodeInfo is safe.
@ -172,10 +172,10 @@ func (info DefaultNodeInfo) Validate() error {
// CompatibleWith checks if two DefaultNodeInfo are compatible with eachother.
// CONTRACT: two nodes are compatible if the Block version and network match
// and they have at least one channel in common.
func (info DefaultNodeInfo) CompatibleWith(other_ NodeInfo) error {
other, ok := other_.(DefaultNodeInfo)
func (info DefaultNodeInfo) CompatibleWith(otherInfo NodeInfo) error {
other, ok := otherInfo.(DefaultNodeInfo)
if !ok {
return fmt.Errorf("wrong NodeInfo type. Expected DefaultNodeInfo, got %v", reflect.TypeOf(other_))
return fmt.Errorf("wrong NodeInfo type. Expected DefaultNodeInfo, got %v", reflect.TypeOf(otherInfo))
}
if info.ProtocolVersion.Block != other.ProtocolVersion.Block {


+ 1
- 1
p2p/node_info_test.go View File

@ -114,7 +114,7 @@ func TestNodeInfoCompatible(t *testing.T) {
testName string
malleateNodeInfo func(*DefaultNodeInfo)
}{
{"Wrong block version", func(ni *DefaultNodeInfo) { ni.ProtocolVersion.Block += 1 }},
{"Wrong block version", func(ni *DefaultNodeInfo) { ni.ProtocolVersion.Block++ }},
{"Wrong network", func(ni *DefaultNodeInfo) { ni.Network += "-wrong" }},
{"No common channels", func(ni *DefaultNodeInfo) { ni.Channels = []byte{newTestChannel} }},
}


+ 1
- 1
p2p/peer_test.go View File

@ -220,7 +220,7 @@ func (rp *remotePeer) accept() {
func (rp *remotePeer) nodeInfo() NodeInfo {
return DefaultNodeInfo{
ProtocolVersion: defaultProtocolVersion,
ID_: rp.Addr().ID,
DefaultNodeID: rp.Addr().ID,
ListenAddr: rp.listener.Addr().String(),
Network: "testing",
Version: "1.2.3-rc0-deadbeef",


+ 1
- 1
p2p/pex/codec.go View File

@ -7,5 +7,5 @@ import (
var cdc *amino.Codec = amino.NewCodec()
func init() {
RegisterPexMessage(cdc)
RegisterMessages(cdc)
}

+ 40
- 40
p2p/pex/pex_reactor.go View File

@ -66,7 +66,7 @@ func (e errTooEarlyToDial) Error() string {
e.backoffDuration, e.lastDialed, time.Since(e.lastDialed))
}
// PEXReactor handles PEX (peer exchange) and ensures that an
// Reactor handles PEX (peer exchange) and ensures that an
// adequate number of peers are connected to the switch.
//
// It uses `AddrBook` (address book) to store `NetAddress`es of the peers.
@ -75,11 +75,11 @@ func (e errTooEarlyToDial) Error() string {
//
// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too.
// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod.
type PEXReactor struct {
type Reactor struct {
p2p.BaseReactor
book AddrBook
config *PEXReactorConfig
config *ReactorConfig
ensurePeersPeriod time.Duration // TODO: should go in the config
// maps to prevent abuse
@ -94,14 +94,14 @@ type PEXReactor struct {
crawlPeerInfos map[p2p.ID]crawlPeerInfo
}
func (r *PEXReactor) minReceiveRequestInterval() time.Duration {
func (r *Reactor) minReceiveRequestInterval() time.Duration {
// NOTE: must be less than ensurePeersPeriod, otherwise we'll request
// peers too quickly from others and they'll think we're bad!
return r.ensurePeersPeriod / 3
}
// PEXReactorConfig holds reactor specific configuration data.
type PEXReactorConfig struct {
// ReactorConfig holds reactor specific configuration data.
type ReactorConfig struct {
// Seed/Crawler mode
SeedMode bool
@ -123,9 +123,9 @@ type _attemptsToDial struct {
lastDialed time.Time
}
// NewPEXReactor creates new PEX reactor.
func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor {
r := &PEXReactor{
// NewReactor creates new PEX reactor.
func NewReactor(b AddrBook, config *ReactorConfig) *Reactor {
r := &Reactor{
book: b,
config: config,
ensurePeersPeriod: defaultEnsurePeersPeriod,
@ -133,12 +133,12 @@ func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor {
lastReceivedRequests: cmn.NewCMap(),
crawlPeerInfos: make(map[p2p.ID]crawlPeerInfo),
}
r.BaseReactor = *p2p.NewBaseReactor("PEXReactor", r)
r.BaseReactor = *p2p.NewBaseReactor("Reactor", r)
return r
}
// OnStart implements BaseService
func (r *PEXReactor) OnStart() error {
func (r *Reactor) OnStart() error {
err := r.book.Start()
if err != nil && err != cmn.ErrAlreadyStarted {
return err
@ -164,12 +164,12 @@ func (r *PEXReactor) OnStart() error {
}
// OnStop implements BaseService
func (r *PEXReactor) OnStop() {
func (r *Reactor) OnStop() {
r.book.Stop()
}
// GetChannels implements Reactor
func (r *PEXReactor) GetChannels() []*conn.ChannelDescriptor {
func (r *Reactor) GetChannels() []*conn.ChannelDescriptor {
return []*conn.ChannelDescriptor{
{
ID: PexChannel,
@ -181,7 +181,7 @@ func (r *PEXReactor) GetChannels() []*conn.ChannelDescriptor {
// AddPeer implements Reactor by adding peer to the address book (if inbound)
// or by requesting more addresses (if outbound).
func (r *PEXReactor) AddPeer(p Peer) {
func (r *Reactor) AddPeer(p Peer) {
if p.IsOutbound() {
// For outbound peers, the address is already in the books -
// either via DialPeersAsync or r.Receive.
@ -208,13 +208,13 @@ func (r *PEXReactor) AddPeer(p Peer) {
}
// RemovePeer implements Reactor by resetting peer's requests info.
func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
func (r *Reactor) RemovePeer(p Peer, reason interface{}) {
id := string(p.ID())
r.requestsSent.Delete(id)
r.lastReceivedRequests.Delete(id)
}
func (r *PEXReactor) logErrAddrBook(err error) {
func (r *Reactor) logErrAddrBook(err error) {
if err != nil {
switch err.(type) {
case ErrAddrBookNilAddr:
@ -227,7 +227,7 @@ func (r *PEXReactor) logErrAddrBook(err error) {
}
// Receive implements Reactor by handling incoming PEX messages.
func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
if err != nil {
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
@ -285,7 +285,7 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
}
// enforces a minimum amount of time between requests
func (r *PEXReactor) receiveRequest(src Peer) error {
func (r *Reactor) receiveRequest(src Peer) error {
id := string(src.ID())
v := r.lastReceivedRequests.Get(id)
if v == nil {
@ -320,7 +320,7 @@ func (r *PEXReactor) receiveRequest(src Peer) error {
// RequestAddrs asks peer for more addresses if we do not already have a
// request out for this peer.
func (r *PEXReactor) RequestAddrs(p Peer) {
func (r *Reactor) RequestAddrs(p Peer) {
id := string(p.ID())
if r.requestsSent.Has(id) {
return
@ -333,7 +333,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) {
// ReceiveAddrs adds the given addrs to the addrbook if theres an open
// request for this peer and deletes the open request.
// If there's no open request for the src peer, it returns an error.
func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
id := string(src.ID())
if !r.requestsSent.Has(id) {
return errors.New("unsolicited pexAddrsMessage")
@ -385,17 +385,17 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
}
// SendAddrs sends addrs to the peer.
func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: netAddrs}))
}
// SetEnsurePeersPeriod sets period to ensure peers connected.
func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) {
func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) {
r.ensurePeersPeriod = d
}
// Ensures that sufficient peers are connected. (continuous)
func (r *PEXReactor) ensurePeersRoutine() {
func (r *Reactor) ensurePeersRoutine() {
var (
seed = cmn.NewRand()
jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds())
@ -430,7 +430,7 @@ func (r *PEXReactor) ensurePeersRoutine() {
// heuristic that we haven't perfected yet, or, perhaps is manually edited by
// the node operator. It should not be used to compute what addresses are
// already connected or not.
func (r *PEXReactor) ensurePeers() {
func (r *Reactor) ensurePeers() {
var (
out, in, dial = r.Switch.NumPeers()
numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial)
@ -509,7 +509,7 @@ func (r *PEXReactor) ensurePeers() {
}
}
func (r *PEXReactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) {
func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) {
_attempts, ok := r.attemptsToDial.Load(addr.DialString())
if !ok {
return
@ -518,7 +518,7 @@ func (r *PEXReactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastD
return atd.number, atd.lastDialed
}
func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) error {
func (r *Reactor) dialPeer(addr *p2p.NetAddress) error {
attempts, lastDialed := r.dialAttemptsInfo(addr)
if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial {
// TODO(melekes): have a blacklist in the addrbook with peers whom we've
@ -563,7 +563,7 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) error {
}
// maxBackoffDurationForPeer caps the backoff duration for persistent peers.
func (r *PEXReactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration {
func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration {
if r.config.PersistentPeersMaxDialPeriod > 0 &&
planned > r.config.PersistentPeersMaxDialPeriod &&
r.Switch.IsPeerPersistent(addr) {
@ -577,7 +577,7 @@ func (r *PEXReactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned tim
// return err if user provided any badly formatted seed addresses.
// Doesn't error if the seed node can't be reached.
// numOnline returns -1 if no seed nodes were in the initial configuration.
func (r *PEXReactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) {
func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) {
lSeeds := len(r.config.Seeds)
if lSeeds == 0 {
return -1, nil, nil
@ -596,7 +596,7 @@ func (r *PEXReactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, er
}
// randomly dial seeds until we connect to one or exhaust them
func (r *PEXReactor) dialSeeds() {
func (r *Reactor) dialSeeds() {
perm := cmn.RandPerm(len(r.seedAddrs))
// perm := r.Switch.rng.Perm(lSeeds)
for _, i := range perm {
@ -618,7 +618,7 @@ func (r *PEXReactor) dialSeeds() {
// AttemptsToDial returns the number of attempts to dial specific address. It
// returns 0 if never attempted or successfully connected.
func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int {
func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int {
lAttempts, attempted := r.attemptsToDial.Load(addr.DialString())
if attempted {
return lAttempts.(_attemptsToDial).number
@ -631,7 +631,7 @@ func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int {
// Explores the network searching for more peers. (continuous)
// Seed/Crawler Mode causes this node to quickly disconnect
// from peers, except other seed nodes.
func (r *PEXReactor) crawlPeersRoutine() {
func (r *Reactor) crawlPeersRoutine() {
// If we have any seed nodes, consult them first
if len(r.seedAddrs) > 0 {
r.dialSeeds()
@ -657,7 +657,7 @@ func (r *PEXReactor) crawlPeersRoutine() {
// nodeHasSomePeersOrDialingAny returns true if the node is connected to some
// peers or dialing them currently.
func (r *PEXReactor) nodeHasSomePeersOrDialingAny() bool {
func (r *Reactor) nodeHasSomePeersOrDialingAny() bool {
out, in, dial := r.Switch.NumPeers()
return out+in+dial > 0
}
@ -671,7 +671,7 @@ type crawlPeerInfo struct {
}
// crawlPeers will crawl the network looking for new peer addresses.
func (r *PEXReactor) crawlPeers(addrs []*p2p.NetAddress) {
func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) {
now := time.Now()
for _, addr := range addrs {
@ -706,7 +706,7 @@ func (r *PEXReactor) crawlPeers(addrs []*p2p.NetAddress) {
}
}
func (r *PEXReactor) cleanupCrawlPeerInfos() {
func (r *Reactor) cleanupCrawlPeerInfos() {
for id, info := range r.crawlPeerInfos {
// If we did not crawl a peer for 24 hours, it means the peer was removed
// from the addrbook => remove
@ -721,7 +721,7 @@ func (r *PEXReactor) cleanupCrawlPeerInfos() {
}
// attemptDisconnects checks if we've been with each peer long enough to disconnect
func (r *PEXReactor) attemptDisconnects() {
func (r *Reactor) attemptDisconnects() {
for _, peer := range r.Switch.Peers().List() {
if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod {
continue
@ -746,17 +746,17 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) {
//-----------------------------------------------------------------------------
// Messages
// PexMessage is a primary type for PEX messages. Underneath, it could contain
// Message is a primary type for PEX messages. Underneath, it could contain
// either pexRequestMessage, or pexAddrsMessage messages.
type PexMessage interface{}
type Message interface{}
func RegisterPexMessage(cdc *amino.Codec) {
cdc.RegisterInterface((*PexMessage)(nil), nil)
func RegisterMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*Message)(nil), nil)
cdc.RegisterConcrete(&pexRequestMessage{}, "tendermint/p2p/PexRequestMessage", nil)
cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil)
}
func decodeMsg(bz []byte) (msg PexMessage, err error) {
func decodeMsg(bz []byte) (msg Message, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}


+ 24
- 24
p2p/pex/pex_reactor_test.go View File

@ -28,7 +28,7 @@ func init() {
}
func TestPEXReactorBasic(t *testing.T) {
r, book := createReactor(&PEXReactorConfig{})
r, book := createReactor(&ReactorConfig{})
defer teardownReactor(book)
assert.NotNil(t, r)
@ -36,7 +36,7 @@ func TestPEXReactorBasic(t *testing.T) {
}
func TestPEXReactorAddRemovePeer(t *testing.T) {
r, book := createReactor(&PEXReactorConfig{})
r, book := createReactor(&ReactorConfig{})
defer teardownReactor(book)
size := book.Size()
@ -86,7 +86,7 @@ func TestPEXReactorRunning(t *testing.T) {
sw.SetLogger(logger.With("pex", i))
r := NewPEXReactor(books[i], &PEXReactorConfig{})
r := NewReactor(books[i], &ReactorConfig{})
r.SetLogger(logger.With("pex", i))
r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r)
@ -118,7 +118,7 @@ func TestPEXReactorRunning(t *testing.T) {
}
func TestPEXReactorReceive(t *testing.T) {
r, book := createReactor(&PEXReactorConfig{})
r, book := createReactor(&ReactorConfig{})
defer teardownReactor(book)
peer := p2p.CreateRandomPeer(false)
@ -137,7 +137,7 @@ func TestPEXReactorReceive(t *testing.T) {
}
func TestPEXReactorRequestMessageAbuse(t *testing.T) {
r, book := createReactor(&PEXReactorConfig{})
r, book := createReactor(&ReactorConfig{})
defer teardownReactor(book)
sw := createSwitchAndAddReactors(r)
@ -167,7 +167,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
}
func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
r, book := createReactor(&PEXReactorConfig{})
r, book := createReactor(&ReactorConfig{})
defer teardownReactor(book)
sw := createSwitchAndAddReactors(r)
@ -217,7 +217,7 @@ func TestCheckSeeds(t *testing.T) {
peerSwitch.Stop()
// 4. test create peer with all seeds having unresolvable DNS fails
badPeerConfig := &PEXReactorConfig{
badPeerConfig := &ReactorConfig{
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"},
}
@ -226,7 +226,7 @@ func TestCheckSeeds(t *testing.T) {
peerSwitch.Stop()
// 5. test create peer with one good seed address succeeds
badPeerConfig = &PEXReactorConfig{
badPeerConfig = &ReactorConfig{
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657",
seed.NetAddress().String()},
@ -291,7 +291,7 @@ func TestPEXReactorSeedMode(t *testing.T) {
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond}
pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond}
pexR, book := createReactor(pexRConfig)
defer teardownReactor(book)
@ -330,7 +330,7 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) {
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond}
pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond}
pexR, book := createReactor(pexRConfig)
defer teardownReactor(book)
@ -368,7 +368,7 @@ func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) {
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
pexR, book := createReactor(&PEXReactorConfig{SeedMode: true})
pexR, book := createReactor(&ReactorConfig{SeedMode: true})
defer teardownReactor(book)
sw := createSwitchAndAddReactors(pexR)
@ -416,12 +416,12 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) {
sw.SetLogger(logger.With("pex", i))
config := &PEXReactorConfig{}
config := &ReactorConfig{}
if i == 0 {
// first one is a seed node
config = &PEXReactorConfig{SeedMode: true}
config = &ReactorConfig{SeedMode: true}
}
r := NewPEXReactor(books[i], config)
r := NewReactor(books[i], config)
r.SetLogger(logger.With("pex", i))
r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r)
@ -435,7 +435,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) {
require.Nil(t, err)
}
reactor := switches[0].Reactors()["pex"].(*PEXReactor)
reactor := switches[0].Reactors()["pex"].(*Reactor)
peerID := switches[1].NodeInfo().ID()
err = switches[1].DialPeerWithAddress(switches[0].NetAddress())
@ -468,7 +468,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) {
func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
peer := p2p.CreateRandomPeer(false)
pexR, book := createReactor(&PEXReactorConfig{})
pexR, book := createReactor(&ReactorConfig{})
book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())})
defer teardownReactor(book)
@ -486,7 +486,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
}
func TestPEXReactorDialPeer(t *testing.T) {
pexR, book := createReactor(&PEXReactorConfig{})
pexR, book := createReactor(&ReactorConfig{})
defer teardownReactor(book)
sw := createSwitchAndAddReactors(pexR)
@ -564,7 +564,7 @@ func assertPeersWithTimeout(
}
// Creates a peer with the provided config
func testCreatePeerWithConfig(dir string, id int, config *PEXReactorConfig) *p2p.Switch {
func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch {
peer := p2p.MakeSwitch(
cfg,
id,
@ -577,7 +577,7 @@ func testCreatePeerWithConfig(dir string, id int, config *PEXReactorConfig) *p2p
sw.SetLogger(log.TestingLogger())
r := NewPEXReactor(
r := NewReactor(
book,
config,
)
@ -591,7 +591,7 @@ func testCreatePeerWithConfig(dir string, id int, config *PEXReactorConfig) *p2p
// Creates a peer with the default config
func testCreateDefaultPeer(dir string, id int) *p2p.Switch {
return testCreatePeerWithConfig(dir, id, &PEXReactorConfig{})
return testCreatePeerWithConfig(dir, id, &ReactorConfig{})
}
// Creates a seed which knows about the provided addresses / source address pairs.
@ -613,7 +613,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress)
sw.SetLogger(log.TestingLogger())
r := NewPEXReactor(book, &PEXReactorConfig{})
r := NewReactor(book, &ReactorConfig{})
r.SetLogger(log.TestingLogger())
sw.AddReactor("pex", r)
return sw
@ -625,13 +625,13 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress)
// Creates a peer which knows about the provided seed.
// Starting and stopping the peer is left to the caller
func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch {
conf := &PEXReactorConfig{
conf := &ReactorConfig{
Seeds: []string{seed.NetAddress().String()},
}
return testCreatePeerWithConfig(dir, id, conf)
}
func createReactor(conf *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
func createReactor(conf *ReactorConfig) (r *Reactor, book *addrBook) {
// directory to store address book
dir, err := ioutil.TempDir("", "pex_reactor")
if err != nil {
@ -640,7 +640,7 @@ func createReactor(conf *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true)
book.SetLogger(log.TestingLogger())
r = NewPEXReactor(book, conf)
r = NewReactor(book, conf)
r.SetLogger(log.TestingLogger())
return
}


+ 1
- 1
p2p/switch_test.go View File

@ -207,7 +207,7 @@ func TestSwitchPeerFilter(t *testing.T) {
var (
filters = []PeerFilterFunc{
func(_ IPeerSet, _ Peer) error { return nil },
func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied!") },
func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") },
func(_ IPeerSet, _ Peer) error { return nil },
}
sw = MakeSwitch(


+ 1
- 1
p2p/test_util.go View File

@ -261,7 +261,7 @@ func testNodeInfo(id ID, name string) NodeInfo {
func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo {
return DefaultNodeInfo{
ProtocolVersion: defaultProtocolVersion,
ID_: id,
DefaultNodeID: id,
ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()),
Network: network,
Version: "1.2.3-rc0-deadbeef",


+ 5
- 5
p2p/trust/config.go View File

@ -2,8 +2,8 @@ package trust
import "time"
// TrustMetricConfig - Configures the weight functions and time intervals for the metric
type TrustMetricConfig struct {
// MetricConfig - Configures the weight functions and time intervals for the metric
type MetricConfig struct {
// Determines the percentage given to current behavior
ProportionalWeight float64
@ -21,8 +21,8 @@ type TrustMetricConfig struct {
}
// DefaultConfig returns a config with values that have been tested and produce desirable results
func DefaultConfig() TrustMetricConfig {
return TrustMetricConfig{
func DefaultConfig() MetricConfig {
return MetricConfig{
ProportionalWeight: 0.4,
IntegralWeight: 0.6,
TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days.
@ -31,7 +31,7 @@ func DefaultConfig() TrustMetricConfig {
}
// Ensures that all configuration elements have valid values
func customConfig(tmc TrustMetricConfig) TrustMetricConfig {
func customConfig(tmc MetricConfig) MetricConfig {
config := DefaultConfig()
// Check the config for set values, and setup appropriately


+ 29
- 29
p2p/trust/metric.go View File

@ -30,9 +30,9 @@ type MetricHistoryJSON struct {
History []float64 `json:"history"`
}
// TrustMetric - keeps track of peer reliability
// Metric - keeps track of peer reliability
// See tendermint/docs/architecture/adr-006-trust-metric.md for details
type TrustMetric struct {
type Metric struct {
cmn.BaseService
// Mutex that protects the metric from concurrent access
@ -83,14 +83,14 @@ type TrustMetric struct {
// NewMetric returns a trust metric with the default configuration.
// Use Start to begin tracking the quality of peer behavior over time
func NewMetric() *TrustMetric {
func NewMetric() *Metric {
return NewMetricWithConfig(DefaultConfig())
}
// NewMetricWithConfig returns a trust metric with a custom configuration.
// Use Start to begin tracking the quality of peer behavior over time
func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {
tm := new(TrustMetric)
func NewMetricWithConfig(tmc MetricConfig) *Metric {
tm := new(Metric)
config := customConfig(tmc)
// Setup using the configuration values
@ -104,12 +104,12 @@ func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {
// This metric has a perfect history so far
tm.historyValue = 1.0
tm.BaseService = *cmn.NewBaseService(nil, "TrustMetric", tm)
tm.BaseService = *cmn.NewBaseService(nil, "Metric", tm)
return tm
}
// OnStart implements Service
func (tm *TrustMetric) OnStart() error {
func (tm *Metric) OnStart() error {
if err := tm.BaseService.OnStart(); err != nil {
return err
}
@ -119,10 +119,10 @@ func (tm *TrustMetric) OnStart() error {
// OnStop implements Service
// Nothing to do since the goroutine shuts down by itself via BaseService.Quit()
func (tm *TrustMetric) OnStop() {}
func (tm *Metric) OnStop() {}
// Returns a snapshot of the trust metric history data
func (tm *TrustMetric) HistoryJSON() MetricHistoryJSON {
func (tm *Metric) HistoryJSON() MetricHistoryJSON {
tm.mtx.Lock()
defer tm.mtx.Unlock()
@ -135,7 +135,7 @@ func (tm *TrustMetric) HistoryJSON() MetricHistoryJSON {
// Instantiates a trust metric by loading the history data for a single peer.
// This is called only once and only right after creation, which is why the
// lock is not held while accessing the trust metric struct members
func (tm *TrustMetric) Init(hist MetricHistoryJSON) {
func (tm *Metric) Init(hist MetricHistoryJSON) {
// Restore the number of time intervals we have previously tracked
if hist.NumIntervals > tm.maxIntervals {
hist.NumIntervals = tm.maxIntervals
@ -164,7 +164,7 @@ func (tm *TrustMetric) Init(hist MetricHistoryJSON) {
// Pause tells the metric to pause recording data over time intervals.
// All method calls that indicate events will unpause the metric
func (tm *TrustMetric) Pause() {
func (tm *Metric) Pause() {
tm.mtx.Lock()
defer tm.mtx.Unlock()
@ -173,7 +173,7 @@ func (tm *TrustMetric) Pause() {
}
// BadEvents indicates that an undesirable event(s) took place
func (tm *TrustMetric) BadEvents(num int) {
func (tm *Metric) BadEvents(num int) {
tm.mtx.Lock()
defer tm.mtx.Unlock()
@ -182,7 +182,7 @@ func (tm *TrustMetric) BadEvents(num int) {
}
// GoodEvents indicates that a desirable event(s) took place
func (tm *TrustMetric) GoodEvents(num int) {
func (tm *Metric) GoodEvents(num int) {
tm.mtx.Lock()
defer tm.mtx.Unlock()
@ -191,7 +191,7 @@ func (tm *TrustMetric) GoodEvents(num int) {
}
// TrustValue gets the dependable trust value; always between 0 and 1
func (tm *TrustMetric) TrustValue() float64 {
func (tm *Metric) TrustValue() float64 {
tm.mtx.Lock()
defer tm.mtx.Unlock()
@ -199,14 +199,14 @@ func (tm *TrustMetric) TrustValue() float64 {
}
// TrustScore gets a score based on the trust value always between 0 and 100
func (tm *TrustMetric) TrustScore() int {
func (tm *Metric) TrustScore() int {
score := tm.TrustValue() * 100
return int(math.Floor(score))
}
// NextTimeInterval saves current time interval data and prepares for the following interval
func (tm *TrustMetric) NextTimeInterval() {
func (tm *Metric) NextTimeInterval() {
tm.mtx.Lock()
defer tm.mtx.Unlock()
@ -245,9 +245,9 @@ func (tm *TrustMetric) NextTimeInterval() {
}
// SetTicker allows a TestTicker to be provided that will manually control
// the passing of time from the perspective of the TrustMetric.
// the passing of time from the perspective of the Metric.
// The ticker must be set before Start is called on the metric
func (tm *TrustMetric) SetTicker(ticker MetricTicker) {
func (tm *Metric) SetTicker(ticker MetricTicker) {
tm.mtx.Lock()
defer tm.mtx.Unlock()
@ -255,7 +255,7 @@ func (tm *TrustMetric) SetTicker(ticker MetricTicker) {
}
// Copy returns a new trust metric with members containing the same values
func (tm *TrustMetric) Copy() *TrustMetric {
func (tm *Metric) Copy() *Metric {
if tm == nil {
return nil
}
@ -263,7 +263,7 @@ func (tm *TrustMetric) Copy() *TrustMetric {
tm.mtx.Lock()
defer tm.mtx.Unlock()
return &TrustMetric{
return &Metric{
proportionalWeight: tm.proportionalWeight,
integralWeight: tm.integralWeight,
numIntervals: tm.numIntervals,
@ -285,7 +285,7 @@ func (tm *TrustMetric) Copy() *TrustMetric {
/* Private methods */
// This method is for a goroutine that handles all requests on the metric
func (tm *TrustMetric) processRequests() {
func (tm *Metric) processRequests() {
t := tm.testTicker
if t == nil {
// No test ticker was provided, so we create a normal ticker
@ -308,7 +308,7 @@ loop:
// Wakes the trust metric up if it is currently paused
// This method needs to be called with the mutex locked
func (tm *TrustMetric) unpause() {
func (tm *Metric) unpause() {
// Check if this is the first experience with
// what we are tracking since being paused
if tm.paused {
@ -320,7 +320,7 @@ func (tm *TrustMetric) unpause() {
}
// Calculates the trust value for the request processing
func (tm *TrustMetric) calcTrustValue() float64 {
func (tm *Metric) calcTrustValue() float64 {
weightedP := tm.proportionalWeight * tm.proportionalValue()
weightedI := tm.integralWeight * tm.historyValue
weightedD := tm.weightedDerivative()
@ -334,7 +334,7 @@ func (tm *TrustMetric) calcTrustValue() float64 {
}
// Calculates the current score for good/bad experiences
func (tm *TrustMetric) proportionalValue() float64 {
func (tm *Metric) proportionalValue() float64 {
value := 1.0
total := tm.good + tm.bad
@ -345,7 +345,7 @@ func (tm *TrustMetric) proportionalValue() float64 {
}
// Strengthens the derivative component when the change is negative
func (tm *TrustMetric) weightedDerivative() float64 {
func (tm *Metric) weightedDerivative() float64 {
var weight float64 = defaultDerivativeGamma1
d := tm.derivativeValue()
@ -356,12 +356,12 @@ func (tm *TrustMetric) weightedDerivative() float64 {
}
// Calculates the derivative component
func (tm *TrustMetric) derivativeValue() float64 {
func (tm *Metric) derivativeValue() float64 {
return tm.proportionalValue() - tm.historyValue
}
// Calculates the integral (history) component of the trust value
func (tm *TrustMetric) calcHistoryValue() float64 {
func (tm *Metric) calcHistoryValue() float64 {
var hv float64
for i := 0; i < tm.numIntervals; i++ {
@ -372,7 +372,7 @@ func (tm *TrustMetric) calcHistoryValue() float64 {
}
// Retrieves the actual history data value that represents the requested time interval
func (tm *TrustMetric) fadedMemoryValue(interval int) float64 {
func (tm *Metric) fadedMemoryValue(interval int) float64 {
first := tm.historySize - 1
if interval == 0 {
@ -387,7 +387,7 @@ func (tm *TrustMetric) fadedMemoryValue(interval int) float64 {
// Performs the update for our Faded Memories process, which allows the
// trust metric tracking window to be large while maintaining a small
// number of history data values
func (tm *TrustMetric) updateFadedMemory() {
func (tm *Metric) updateFadedMemory() {
if tm.historySize < 2 {
return
}


+ 2
- 2
p2p/trust/metric_test.go View File

@ -26,7 +26,7 @@ func TestTrustMetricScores(t *testing.T) {
func TestTrustMetricConfig(t *testing.T) {
// 7 days
window := time.Minute * 60 * 24 * 7
config := TrustMetricConfig{
config := MetricConfig{
TrackingWindow: window,
IntervalLength: 2 * time.Minute,
}
@ -57,7 +57,7 @@ func TestTrustMetricConfig(t *testing.T) {
}
func TestTrustMetricCopyNilPointer(t *testing.T) {
var tm *TrustMetric
var tm *Metric
ctm := tm.Copy()


+ 19
- 19
p2p/trust/store.go View File

@ -17,12 +17,12 @@ const defaultStorePeriodicSaveInterval = 1 * time.Minute
var trustMetricKey = []byte("trustMetricStore")
// TrustMetricStore - Manages all trust metrics for peers
type TrustMetricStore struct {
// MetricStore - Manages all trust metrics for peers
type MetricStore struct {
cmn.BaseService
// Maps a Peer.Key to that peer's TrustMetric
peerMetrics map[string]*TrustMetric
peerMetrics map[string]*Metric
// Mutex that protects the map and history data file
mtx sync.Mutex
@ -31,25 +31,25 @@ type TrustMetricStore struct {
db dbm.DB
// This configuration will be used when creating new TrustMetrics
config TrustMetricConfig
config MetricConfig
}
// NewTrustMetricStore returns a store that saves data to the DB
// and uses the config when creating new trust metrics.
// Use Start to to initialize the trust metric store
func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore {
tms := &TrustMetricStore{
peerMetrics: make(map[string]*TrustMetric),
func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore {
tms := &MetricStore{
peerMetrics: make(map[string]*Metric),
db: db,
config: tmc,
}
tms.BaseService = *cmn.NewBaseService(nil, "TrustMetricStore", tms)
tms.BaseService = *cmn.NewBaseService(nil, "MetricStore", tms)
return tms
}
// OnStart implements Service
func (tms *TrustMetricStore) OnStart() error {
func (tms *MetricStore) OnStart() error {
if err := tms.BaseService.OnStart(); err != nil {
return err
}
@ -63,7 +63,7 @@ func (tms *TrustMetricStore) OnStart() error {
}
// OnStop implements Service
func (tms *TrustMetricStore) OnStop() {
func (tms *MetricStore) OnStop() {
tms.BaseService.OnStop()
tms.mtx.Lock()
@ -79,7 +79,7 @@ func (tms *TrustMetricStore) OnStop() {
}
// Size returns the number of entries in the trust metric store
func (tms *TrustMetricStore) Size() int {
func (tms *MetricStore) Size() int {
tms.mtx.Lock()
defer tms.mtx.Unlock()
@ -88,7 +88,7 @@ func (tms *TrustMetricStore) Size() int {
// AddPeerTrustMetric takes an existing trust metric and associates it with a peer key.
// The caller is expected to call Start on the TrustMetric being added
func (tms *TrustMetricStore) AddPeerTrustMetric(key string, tm *TrustMetric) {
func (tms *MetricStore) AddPeerTrustMetric(key string, tm *Metric) {
tms.mtx.Lock()
defer tms.mtx.Unlock()
@ -99,7 +99,7 @@ func (tms *TrustMetricStore) AddPeerTrustMetric(key string, tm *TrustMetric) {
}
// GetPeerTrustMetric returns a trust metric by peer key
func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {
func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric {
tms.mtx.Lock()
defer tms.mtx.Unlock()
@ -115,7 +115,7 @@ func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {
}
// PeerDisconnected pauses the trust metric associated with the peer identified by the key
func (tms *TrustMetricStore) PeerDisconnected(key string) {
func (tms *MetricStore) PeerDisconnected(key string) {
tms.mtx.Lock()
defer tms.mtx.Unlock()
@ -127,7 +127,7 @@ func (tms *TrustMetricStore) PeerDisconnected(key string) {
// Saves the history data for all peers to the store DB.
// This public method acquires the trust metric store lock
func (tms *TrustMetricStore) SaveToDB() {
func (tms *MetricStore) SaveToDB() {
tms.mtx.Lock()
defer tms.mtx.Unlock()
@ -137,7 +137,7 @@ func (tms *TrustMetricStore) SaveToDB() {
/* Private methods */
// size returns the number of entries in the store without acquiring the mutex
func (tms *TrustMetricStore) size() int {
func (tms *MetricStore) size() int {
return len(tms.peerMetrics)
}
@ -146,7 +146,7 @@ func (tms *TrustMetricStore) size() int {
// Loads the history data for all peers from the store DB
// cmn.Panics if file is corrupt
func (tms *TrustMetricStore) loadFromDB() bool {
func (tms *MetricStore) loadFromDB() bool {
// Obtain the history data we have so far
bytes := tms.db.Get(trustMetricKey)
if bytes == nil {
@ -173,7 +173,7 @@ func (tms *TrustMetricStore) loadFromDB() bool {
}
// Saves the history data for all peers to the store DB
func (tms *TrustMetricStore) saveToDB() {
func (tms *MetricStore) saveToDB() {
tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size())
peers := make(map[string]MetricHistoryJSON)
@ -193,7 +193,7 @@ func (tms *TrustMetricStore) saveToDB() {
}
// Periodically saves the trust history data to the DB
func (tms *TrustMetricStore) saveRoutine() {
func (tms *MetricStore) saveRoutine() {
t := time.NewTicker(defaultStorePeriodicSaveInterval)
defer t.Stop()
loop:


+ 1
- 1
p2p/trust/store_test.go View File

@ -80,7 +80,7 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) {
func TestTrustMetricStoreConfig(t *testing.T) {
historyDB := dbm.NewDB("", "memdb", "")
config := TrustMetricConfig{
config := MetricConfig{
ProportionalWeight: 0.5,
IntegralWeight: 0.5,
}


+ 2
- 2
p2p/upnp/probe.go View File

@ -8,7 +8,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
)
type UPNPCapabilities struct {
type Capabilities struct {
PortMapping bool
Hairpin bool
}
@ -81,7 +81,7 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp
return supportsHairpin
}
func Probe(logger log.Logger) (caps UPNPCapabilities, err error) {
func Probe(logger log.Logger) (caps Capabilities, err error) {
logger.Info("Probing for UPnP!")
intPort, extPort := 8001, 8001


+ 4
- 4
p2p/upnp/upnp.go View File

@ -132,7 +132,7 @@ type ExternalIPAddress struct {
IP string
}
type UPNPService struct {
type Service struct {
ServiceType string `xml:"serviceType"`
ControlURL string `xml:"controlURL"`
}
@ -142,7 +142,7 @@ type DeviceList struct {
}
type ServiceList struct {
Service []UPNPService `xml:"service"`
Service []Service `xml:"service"`
}
type Device struct {
@ -166,7 +166,7 @@ func getChildDevice(d *Device, deviceType string) *Device {
return nil
}
func getChildService(d *Device, serviceType string) *UPNPService {
func getChildService(d *Device, serviceType string) *Service {
sl := d.ServiceList.Service
for i := 0; i < len(sl); i++ {
if strings.Contains(sl[i].ServiceType, serviceType) {
@ -378,7 +378,7 @@ func (n *upnpNAT) AddPortMapping(
// fmt.Println(string(body), err)
mappedExternalPort = externalPort
_ = response
return
return mappedExternalPort, err
}
func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) {


+ 3
- 3
proxy/app_conn_test.go View File

@ -50,7 +50,7 @@ func TestEcho(t *testing.T) {
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication())
s := server.NewSocketServer(sockPath, kvstore.NewApplication())
s.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := s.Start(); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
@ -84,7 +84,7 @@ func BenchmarkEcho(b *testing.B) {
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication())
s := server.NewSocketServer(sockPath, kvstore.NewApplication())
s.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := s.Start(); err != nil {
b.Fatalf("Error starting socket server: %v", err.Error())
@ -123,7 +123,7 @@ func TestInfo(t *testing.T) {
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication())
s := server.NewSocketServer(sockPath, kvstore.NewApplication())
s.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := s.Start(); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())


+ 3
- 3
proxy/client.go View File

@ -66,11 +66,11 @@ func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) {
func DefaultClientCreator(addr, transport, dbDir string) ClientCreator {
switch addr {
case "counter":
return NewLocalClientCreator(counter.NewCounterApplication(false))
return NewLocalClientCreator(counter.NewApplication(false))
case "counter_serial":
return NewLocalClientCreator(counter.NewCounterApplication(true))
return NewLocalClientCreator(counter.NewApplication(true))
case "kvstore":
return NewLocalClientCreator(kvstore.NewKVStoreApplication())
return NewLocalClientCreator(kvstore.NewApplication())
case "persistent_kvstore":
return NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir))
case "noop":


+ 2
- 2
rpc/client/examples_test.go View File

@ -12,7 +12,7 @@ import (
func ExampleHTTP_simple() {
// Start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig)
defer rpctest.StopTendermint(node)
@ -62,7 +62,7 @@ func ExampleHTTP_simple() {
func ExampleHTTP_batching() {
// Start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig)
defer rpctest.StopTendermint(node)


+ 1
- 1
rpc/client/mock/abci_test.go View File

@ -156,7 +156,7 @@ func TestABCIRecorder(t *testing.T) {
func TestABCIApp(t *testing.T) {
assert, require := assert.New(t), require.New(t)
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
m := mock.ABCIApp{app}
// get some info


+ 4
- 4
rpc/client/rpc_test.go View File

@ -500,11 +500,11 @@ func newEvidence(
chainID string,
) types.DuplicateVoteEvidence {
var err error
vote2_ := deepcpVote(vote2)
vote2_.Signature, err = val.Key.PrivKey.Sign(vote2_.SignBytes(chainID))
deepcpVote2 := deepcpVote(vote2)
deepcpVote2.Signature, err = val.Key.PrivKey.Sign(deepcpVote2.SignBytes(chainID))
require.NoError(t, err)
return *types.NewDuplicateVoteEvidence(val.Key.PubKey, vote, vote2_)
return *types.NewDuplicateVoteEvidence(val.Key.PubKey, vote, deepcpVote2)
}
func makeEvidences(
@ -586,7 +586,7 @@ func TestBroadcastEvidenceDuplicateVote(t *testing.T) {
for i, c := range GetClients() {
t.Logf("client %d", i)
result, err := c.BroadcastEvidence(&types.DuplicateVoteEvidence{PubKey: ev.PubKey, VoteA: ev.VoteA, VoteB: ev.VoteB})
result, err := c.BroadcastEvidence(&ev)
require.Nil(t, err)
require.Equal(t, ev.Hash(), result.Hash, "Invalid response, result %+v", result)


+ 2
- 2
rpc/core/pipe.go View File

@ -70,7 +70,7 @@ var (
pubKey crypto.PubKey
genDoc *types.GenesisDoc // cache the genesis structure
txIndexer txindex.TxIndexer
consensusReactor *consensus.ConsensusReactor
consensusReactor *consensus.Reactor
eventBus *types.EventBus // thread safe
mempool mempl.Mempool
@ -123,7 +123,7 @@ func SetTxIndexer(indexer txindex.TxIndexer) {
txIndexer = indexer
}
func SetConsensusReactor(conR *consensus.ConsensusReactor) {
func SetConsensusReactor(conR *consensus.Reactor) {
consensusReactor = conR
}


+ 1
- 1
rpc/grpc/grpc_test.go View File

@ -14,7 +14,7 @@ import (
func TestMain(m *testing.M) {
// start a tendermint node in the background to test against
app := kvstore.NewKVStoreApplication()
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app)
code := m.Run()


+ 1
- 1
rpc/lib/client/integration_test.go View File

@ -47,7 +47,7 @@ func TestWSClientReconnectWithJitter(t *testing.T) {
for key, c := range clientMap {
if !c.IsActive() {
delete(clientMap, key)
stopCount += 1
stopCount++
}
}
require.Equal(t, stopCount, n, "expecting all clients to have been stopped")


+ 3
- 3
rpc/test/helpers.go View File

@ -44,10 +44,10 @@ func waitForRPC() {
_, err := client.Call("status", map[string]interface{}{}, result)
if err == nil {
return
} else {
fmt.Println("error", err)
time.Sleep(time.Millisecond)
}
fmt.Println("error", err)
time.Sleep(time.Millisecond)
}
}


+ 1
- 1
scripts/json2wal/main.go View File

@ -22,7 +22,7 @@ import (
var cdc = amino.NewCodec()
func init() {
cs.RegisterConsensusMessages(cdc)
cs.RegisterMessages(cdc)
cs.RegisterWALMessages(cdc)
types.RegisterBlockAmino(cdc)
}


+ 1
- 1
scripts/wal2json/main.go View File

@ -20,7 +20,7 @@ import (
var cdc = amino.NewCodec()
func init() {
cs.RegisterConsensusMessages(cdc)
cs.RegisterMessages(cdc)
cs.RegisterWALMessages(cdc)
types.RegisterBlockAmino(cdc)
}


+ 1
- 1
state/execution_test.go View File

@ -26,7 +26,7 @@ var (
)
func TestApplyBlock(t *testing.T) {
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
cc := proxy.NewLocalClientCreator(kvstore.NewApplication())
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.Nil(t, err)


+ 0
- 6
state/export_test.go View File

@ -37,12 +37,6 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.V
return validateValidatorUpdates(abciUpdates, params)
}
// CalcValidatorsKey is an alias for the private calcValidatorsKey method in
// store.go, exported exclusively and explicitly for testing.
func CalcValidatorsKey(height int64) []byte {
return calcValidatorsKey(height)
}
// SaveConsensusParamsInfo is an alias for the private saveConsensusParamsInfo
// method in store.go, exported exclusively and explicitly for testing.
func SaveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) {


+ 1
- 1
state/state_test.go View File

@ -362,7 +362,7 @@ func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) {
for i := 0; i < runs; i++ {
prop := valSet.GetProposer()
idx, _ := valSet.GetByAddress(prop.Address)
freqs[idx] += 1
freqs[idx]++
valSet.IncrementProposerPriority(1)
}


+ 20
- 20
state/txindex/kv/kv.go View File

@ -297,24 +297,24 @@ func (r queryRange) lowerBoundValue() interface{} {
if r.includeLowerBound {
return r.lowerBound
} else {
switch t := r.lowerBound.(type) {
case int64:
return t + 1
case time.Time:
return t.Unix() + 1
default:
panic("not implemented")
}
}
switch t := r.lowerBound.(type) {
case int64:
return t + 1
case time.Time:
return t.Unix() + 1
default:
panic("not implemented")
}
}
func (r queryRange) AnyBound() interface{} {
if r.lowerBound != nil {
return r.lowerBound
} else {
return r.upperBound
}
return r.upperBound
}
func (r queryRange) upperBoundValue() interface{} {
@ -324,15 +324,15 @@ func (r queryRange) upperBoundValue() interface{} {
if r.includeUpperBound {
return r.upperBound
} else {
switch t := r.upperBound.(type) {
case int64:
return t - 1
case time.Time:
return t.Unix() - 1
default:
panic("not implemented")
}
}
switch t := r.upperBound.(type) {
case int64:
return t - 1
case time.Time:
return t.Unix() - 1
default:
panic("not implemented")
}
}


+ 3
- 3
test/app/grpc_client.go View File

@ -8,7 +8,7 @@ import (
"context"
amino "github.com/tendermint/go-amino"
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
coregrpc "github.com/tendermint/tendermint/rpc/grpc"
)
var grpcAddr = "tcp://localhost:36656"
@ -26,8 +26,8 @@ func main() {
os.Exit(1)
}
clientGRPC := core_grpc.StartGRPCClient(grpcAddr)
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{Tx: txBytes})
clientGRPC := coregrpc.StartGRPCClient(grpcAddr)
res, err := clientGRPC.BroadcastTx(context.Background(), &coregrpc.RequestBroadcastTx{Tx: txBytes})
if err != nil {
fmt.Println(err)
os.Exit(1)


+ 2
- 1
tools/tm-monitor/monitor/node.go View File

@ -57,7 +57,8 @@ func NewNodeWithEventMeterAndRPCClient(
rpcAddr string,
em eventMeter,
rpcClient rpc_client.HTTPClient,
options ...func(*Node)) *Node {
options ...func(*Node),
) *Node {
n := &Node{
rpcAddr: rpcAddr,
em: em,


+ 13
- 13
types/evidence.go View File

@ -256,13 +256,13 @@ func NewMockRandomGoodEvidence(height int64, address []byte, randBytes []byte) M
}
func (e MockRandomGoodEvidence) Hash() []byte {
return []byte(fmt.Sprintf("%d-%x", e.Height_, e.randBytes))
return []byte(fmt.Sprintf("%d-%x", e.EvidenceHeight, e.randBytes))
}
// UNSTABLE
type MockGoodEvidence struct {
Height_ int64
Address_ []byte
EvidenceHeight int64
EvidenceAddress []byte
}
var _ Evidence = &MockGoodEvidence{}
@ -272,23 +272,23 @@ func NewMockGoodEvidence(height int64, idx int, address []byte) MockGoodEvidence
return MockGoodEvidence{height, address}
}
func (e MockGoodEvidence) Height() int64 { return e.Height_ }
func (e MockGoodEvidence) Address() []byte { return e.Address_ }
func (e MockGoodEvidence) Height() int64 { return e.EvidenceHeight }
func (e MockGoodEvidence) Address() []byte { return e.EvidenceAddress }
func (e MockGoodEvidence) Hash() []byte {
return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_))
return []byte(fmt.Sprintf("%d-%x", e.EvidenceHeight, e.EvidenceAddress))
}
func (e MockGoodEvidence) Bytes() []byte {
return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_))
return []byte(fmt.Sprintf("%d-%x", e.EvidenceHeight, e.EvidenceAddress))
}
func (e MockGoodEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil }
func (e MockGoodEvidence) Equal(ev Evidence) bool {
e2 := ev.(MockGoodEvidence)
return e.Height_ == e2.Height_ &&
bytes.Equal(e.Address_, e2.Address_)
return e.EvidenceHeight == e2.EvidenceHeight &&
bytes.Equal(e.EvidenceAddress, e2.EvidenceAddress)
}
func (e MockGoodEvidence) ValidateBasic() error { return nil }
func (e MockGoodEvidence) String() string {
return fmt.Sprintf("GoodEvidence: %d/%s", e.Height_, e.Address_)
return fmt.Sprintf("GoodEvidence: %d/%s", e.EvidenceHeight, e.EvidenceAddress)
}
// UNSTABLE
@ -301,12 +301,12 @@ func (e MockBadEvidence) Verify(chainID string, pubKey crypto.PubKey) error {
}
func (e MockBadEvidence) Equal(ev Evidence) bool {
e2 := ev.(MockBadEvidence)
return e.Height_ == e2.Height_ &&
bytes.Equal(e.Address_, e2.Address_)
return e.EvidenceHeight == e2.EvidenceHeight &&
bytes.Equal(e.EvidenceAddress, e2.EvidenceAddress)
}
func (e MockBadEvidence) ValidateBasic() error { return nil }
func (e MockBadEvidence) String() string {
return fmt.Sprintf("BadEvidence: %d/%s", e.Height_, e.Address_)
return fmt.Sprintf("BadEvidence: %d/%s", e.EvidenceHeight, e.EvidenceAddress)
}
//-------------------------------------------


+ 15
- 15
types/vote_set.go View File

@ -59,11 +59,11 @@ type P2PID string
NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64.
*/
type VoteSet struct {
chainID string
height int64
round int
type_ SignedMsgType
valSet *ValidatorSet
chainID string
height int64
round int
signedMsgType SignedMsgType
valSet *ValidatorSet
mtx sync.Mutex
votesBitArray *cmn.BitArray
@ -75,7 +75,7 @@ type VoteSet struct {
}
// Constructs a new VoteSet struct used to accumulate votes for given height/round.
func NewVoteSet(chainID string, height int64, round int, type_ SignedMsgType, valSet *ValidatorSet) *VoteSet {
func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsgType, valSet *ValidatorSet) *VoteSet {
if height == 0 {
panic("Cannot make VoteSet for height == 0, doesn't make sense.")
}
@ -83,7 +83,7 @@ func NewVoteSet(chainID string, height int64, round int, type_ SignedMsgType, va
chainID: chainID,
height: height,
round: round,
type_: type_,
signedMsgType: signedMsgType,
valSet: valSet,
votesBitArray: cmn.NewBitArray(valSet.Size()),
votes: make([]*Vote, valSet.Size()),
@ -119,7 +119,7 @@ func (voteSet *VoteSet) Type() byte {
if voteSet == nil {
return 0x00
}
return byte(voteSet.type_)
return byte(voteSet.signedMsgType)
}
// Implements VoteSetReader.
@ -168,9 +168,9 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
// Make sure the step matches.
if (vote.Height != voteSet.height) ||
(vote.Round != voteSet.round) ||
(vote.Type != voteSet.type_) {
(vote.Type != voteSet.signedMsgType) {
return false, errors.Wrapf(ErrVoteUnexpectedStep, "Expected %d/%d/%d, but got %d/%d/%d",
voteSet.height, voteSet.round, voteSet.type_,
voteSet.height, voteSet.round, voteSet.signedMsgType,
vote.Height, vote.Round, vote.Type)
}
@ -400,7 +400,7 @@ func (voteSet *VoteSet) IsCommit() bool {
if voteSet == nil {
return false
}
if voteSet.type_ != PrecommitType {
if voteSet.signedMsgType != PrecommitType {
return false
}
voteSet.mtx.Lock()
@ -464,7 +464,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string {
%s %v
%s %v
%s}`,
indent, voteSet.height, voteSet.round, voteSet.type_,
indent, voteSet.height, voteSet.round, voteSet.signedMsgType,
indent, strings.Join(voteStrings, "\n"+indent+" "),
indent, voteSet.votesBitArray,
indent, voteSet.peerMaj23s,
@ -472,7 +472,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string {
}
// Marshal the VoteSet to JSON. Same as String(), just in JSON,
// and without the height/round/type_ (since its already included in the votes).
// and without the height/round/signedMsgType (since its already included in the votes).
func (voteSet *VoteSet) MarshalJSON() ([]byte, error) {
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
@ -534,7 +534,7 @@ func (voteSet *VoteSet) StringShort() string {
defer voteSet.mtx.Unlock()
_, _, frac := voteSet.sumTotalFrac()
return fmt.Sprintf(`VoteSet{H:%v R:%v T:%v +2/3:%v(%v) %v %v}`,
voteSet.height, voteSet.round, voteSet.type_, voteSet.maj23, frac, voteSet.votesBitArray, voteSet.peerMaj23s)
voteSet.height, voteSet.round, voteSet.signedMsgType, voteSet.maj23, frac, voteSet.votesBitArray, voteSet.peerMaj23s)
}
// return the power voted, the total, and the fraction
@ -551,7 +551,7 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) {
// Panics if the vote type is not PrecommitType or if
// there's no +2/3 votes for a single block.
func (voteSet *VoteSet) MakeCommit() *Commit {
if voteSet.type_ != PrecommitType {
if voteSet.signedMsgType != PrecommitType {
panic("Cannot MakeCommit() unless VoteSet.Type is PrecommitType")
}
voteSet.mtx.Lock()


+ 4
- 4
types/vote_set_test.go View File

@ -15,12 +15,12 @@ import (
func randVoteSet(
height int64,
round int,
type_ SignedMsgType,
signedMsgType SignedMsgType,
numValidators int,
votingPower int64,
) (*VoteSet, *ValidatorSet, []PrivValidator) {
valSet, privValidators := RandValidatorSet(numValidators, votingPower)
return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators
return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators
}
// Convenience: Return new vote with different validator address/index
@ -46,9 +46,9 @@ func withRound(vote *Vote, round int) *Vote {
}
// Convenience: Return new vote with different type
func withType(vote *Vote, type_ byte) *Vote {
func withType(vote *Vote, signedMsgType byte) *Vote {
vote = vote.Copy()
vote.Type = SignedMsgType(type_)
vote.Type = SignedMsgType(signedMsgType)
return vote
}


+ 1
- 1
version/version.go View File

@ -5,7 +5,7 @@ var (
GitCommit string
// Version is the built softwares version.
Version string = TMCoreSemVer
Version = TMCoreSemVer
)
func init() {


Loading…
Cancel
Save