Browse Source

cleanup: Reduce and normalize import path aliasing. (#6975)

The code in the Tendermint repository makes heavy use of import aliasing.
This is made necessary by our extensive reuse of common base package names, and
by repetition of similar names across different subdirectories.

Unfortunately we have not been very consistent about which packages we alias in
various circumstances, and the aliases we use vary. In the spirit of the advice
in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports,
his change makes an effort to clean up and normalize import aliasing.

This change makes no API or behavioral changes. It is a pure cleanup intended
o help make the code more readable to developers (including myself) trying to
understand what is being imported where.

Only unexported names have been modified, and the changes were generated and
applied mechanically with gofmt -r and comby, respecting the lexical and
syntactic rules of Go.  Even so, I did not fix every inconsistency. Where the
changes would be too disruptive, I left it alone.

The principles I followed in this cleanup are:

- Remove aliases that restate the package name.
- Remove aliases where the base package name is unambiguous.
- Move overly-terse abbreviations from the import to the usage site.
- Fix lexical issues (remove underscores, remove capitalization).
- Fix import groupings to more closely match the style guide.
- Group blank (side-effecting) imports and ensure they are commented.
- Add aliases to multiple imports with the same base package name.
pull/6981/head
M. J. Fromberger 3 years ago
committed by GitHub
parent
commit
cf7537ea5f
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
127 changed files with 1473 additions and 1473 deletions
  1. +7
    -7
      abci/example/kvstore/persistent_kvstore.go
  2. +4
    -4
      abci/types/pubkey.go
  3. +4
    -4
      cmd/tendermint/commands/debug/dump.go
  4. +3
    -3
      cmd/tendermint/commands/debug/kill.go
  5. +2
    -2
      cmd/tendermint/commands/debug/util.go
  6. +0
    -1
      cmd/tendermint/commands/light.go
  7. +10
    -9
      cmd/tendermint/commands/reindex_event.go
  8. +4
    -4
      cmd/tendermint/main.go
  9. +6
    -5
      config/db.go
  10. +16
    -16
      crypto/encoding/codec.go
  11. +1
    -2
      crypto/secp256k1/secp256k1_test.go
  12. +11
    -11
      crypto/xchacha20poly1305/xchachapoly_test.go
  13. +8
    -8
      internal/blocksync/v0/reactor.go
  14. +17
    -17
      internal/blocksync/v0/reactor_test.go
  15. +5
    -5
      internal/blocksync/v2/processor.go
  16. +3
    -3
      internal/blocksync/v2/processor_context.go
  17. +5
    -5
      internal/blocksync/v2/processor_test.go
  18. +6
    -6
      internal/blocksync/v2/reactor.go
  19. +9
    -9
      internal/blocksync/v2/reactor_test.go
  20. +2
    -1
      internal/consensus/byzantine_test.go
  21. +35
    -37
      internal/consensus/common_test.go
  22. +5
    -6
      internal/consensus/mempool_test.go
  23. +22
    -22
      internal/consensus/reactor_test.go
  24. +9
    -9
      internal/consensus/replay_file.go
  25. +5
    -5
      internal/consensus/replay_stubs.go
  26. +58
    -58
      internal/consensus/replay_test.go
  27. +4
    -4
      internal/consensus/state.go
  28. +6
    -6
      internal/consensus/types/height_vote_set_test.go
  29. +11
    -11
      internal/consensus/wal_generator.go
  30. +0
    -1
      internal/evidence/verify_test.go
  31. +5
    -5
      internal/mempool/mock/mempool.go
  32. +6
    -6
      internal/mempool/v0/clist_mempool.go
  33. +12
    -12
      internal/mempool/v0/clist_mempool_test.go
  34. +8
    -8
      internal/mempool/v0/reactor.go
  35. +17
    -17
      internal/mempool/v0/reactor_test.go
  36. +8
    -8
      internal/mempool/v1/reactor.go
  37. +2
    -2
      internal/p2p/conn/evil_secret_connection_test.go
  38. +3
    -3
      internal/p2p/conn/secret_connection.go
  39. +2
    -1
      internal/p2p/peermanager_scoring_test.go
  40. +35
    -35
      internal/p2p/pex/reactor_test.go
  41. +5
    -5
      internal/rpc/core/abci.go
  42. +21
    -21
      internal/rpc/core/blocks.go
  43. +3
    -3
      internal/rpc/core/blocks_test.go
  44. +16
    -16
      internal/rpc/core/consensus.go
  45. +3
    -3
      internal/rpc/core/dev.go
  46. +10
    -10
      internal/rpc/core/env.go
  47. +8
    -8
      internal/rpc/core/events.go
  48. +4
    -4
      internal/rpc/core/evidence.go
  49. +3
    -3
      internal/rpc/core/health.go
  50. +20
    -20
      internal/rpc/core/mempool.go
  51. +23
    -23
      internal/rpc/core/net.go
  52. +3
    -3
      internal/rpc/core/net_test.go
  53. +6
    -6
      internal/rpc/core/status.go
  54. +8
    -8
      internal/rpc/core/tx.go
  55. +6
    -6
      internal/state/execution.go
  56. +8
    -8
      internal/state/execution_test.go
  57. +4
    -4
      internal/state/helpers_test.go
  58. +3
    -2
      internal/state/indexer/block/kv/kv_test.go
  59. +5
    -3
      internal/state/indexer/indexer_service_test.go
  60. +2
    -1
      internal/state/indexer/sink/kv/kv.go
  61. +9
    -8
      internal/state/indexer/sink/kv/kv_test.go
  62. +8
    -9
      internal/state/indexer/tx/kv/kv_test.go
  63. +25
    -26
      internal/state/state_test.go
  64. +6
    -7
      internal/state/store_test.go
  65. +5
    -5
      internal/state/tx_filter.go
  66. +10
    -10
      internal/state/validation_test.go
  67. +2
    -2
      internal/statesync/dispatcher.go
  68. +3
    -3
      internal/test/factory/genesis.go
  69. +0
    -1
      light/client_test.go
  70. +3
    -3
      light/provider/http/http.go
  71. +51
    -51
      light/proxy/routes.go
  72. +44
    -44
      light/rpc/client.go
  73. +0
    -1
      light/store/db/db_test.go
  74. +120
    -118
      node/node.go
  75. +75
    -76
      node/node_test.go
  76. +109
    -108
      node/setup.go
  77. +2
    -2
      privval/grpc/client.go
  78. +2
    -2
      privval/grpc/server.go
  79. +6
    -6
      privval/grpc/util.go
  80. +2
    -2
      privval/msgs_test.go
  81. +3
    -3
      privval/secret_connection.go
  82. +2
    -2
      privval/signer_client.go
  83. +2
    -2
      privval/signer_requestHandler.go
  84. +2
    -2
      proto/tendermint/blocksync/message.go
  85. +1
    -1
      proto/tendermint/blocksync/message_test.go
  86. +2
    -2
      proto/tendermint/consensus/message.go
  87. +2
    -2
      proto/tendermint/mempool/message.go
  88. +2
    -2
      proto/tendermint/p2p/pex.go
  89. +2
    -2
      proto/tendermint/statesync/message.go
  90. +1
    -1
      proto/tendermint/statesync/message_test.go
  91. +2
    -2
      rpc/client/event_test.go
  92. +2
    -2
      rpc/client/evidence_test.go
  93. +2
    -2
      rpc/client/examples_test.go
  94. +5
    -5
      rpc/client/helpers_test.go
  95. +54
    -54
      rpc/client/http/http.go
  96. +7
    -7
      rpc/client/http/ws.go
  97. +29
    -29
      rpc/client/interface.go
  98. +49
    -49
      rpc/client/local/local.go
  99. +29
    -29
      rpc/client/mock/abci.go
  100. +3
    -3
      rpc/client/mock/abci_test.go

+ 7
- 7
abci/example/kvstore/persistent_kvstore.go View File

@ -11,9 +11,9 @@ import (
"github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/abci/types"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
) )
const ( const (
@ -30,7 +30,7 @@ type PersistentKVStoreApplication struct {
// validator set // validator set
ValUpdates []types.ValidatorUpdate ValUpdates []types.ValidatorUpdate
valAddrToPubKeyMap map[string]pc.PublicKey
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
logger log.Logger logger log.Logger
} }
@ -46,7 +46,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
return &PersistentKVStoreApplication{ return &PersistentKVStoreApplication{
app: &Application{state: state}, app: &Application{state: state},
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
logger: log.NewNopLogger(), logger: log.NewNopLogger(),
} }
} }
@ -194,8 +194,8 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
return return
} }
func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte {
pk, err := cryptoenc.PubKeyFromProto(pubkey)
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
pk, err := encoding.PubKeyFromProto(pubkey)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -243,7 +243,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
// add, update, or remove a validator // add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil { if err != nil {
panic(fmt.Errorf("can't decode public key: %w", err)) panic(fmt.Errorf("can't decode public key: %w", err))
} }


+ 4
- 4
abci/types/pubkey.go View File

@ -4,7 +4,7 @@ import (
fmt "fmt" fmt "fmt"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto/sr25519" "github.com/tendermint/tendermint/crypto/sr25519"
) )
@ -12,7 +12,7 @@ import (
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
pke := ed25519.PubKey(pk) pke := ed25519.PubKey(pk)
pkp, err := cryptoenc.PubKeyToProto(pke)
pkp, err := encoding.PubKeyToProto(pke)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -29,7 +29,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
return Ed25519ValidatorUpdate(pk, power) return Ed25519ValidatorUpdate(pk, power)
case secp256k1.KeyType: case secp256k1.KeyType:
pke := secp256k1.PubKey(pk) pke := secp256k1.PubKey(pk)
pkp, err := cryptoenc.PubKeyToProto(pke)
pkp, err := encoding.PubKeyToProto(pke)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -39,7 +39,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
} }
case sr25519.KeyType: case sr25519.KeyType:
pke := sr25519.PubKey(pk) pke := sr25519.PubKey(pk)
pkp, err := cryptoenc.PubKeyToProto(pke)
pkp, err := encoding.PubKeyToProto(pke)
if err != nil { if err != nil {
panic(err) panic(err)
} }


+ 4
- 4
cmd/tendermint/commands/debug/dump.go View File

@ -11,7 +11,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/cli"
rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpchttp "github.com/tendermint/tendermint/rpc/client/http"
) )
@ -65,9 +65,9 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
} }
home := viper.GetString(cli.HomeFlag) home := viper.GetString(cli.HomeFlag)
conf := cfg.DefaultConfig()
conf := config.DefaultConfig()
conf = conf.SetRoot(home) conf = conf.SetRoot(home)
cfg.EnsureRoot(conf.RootDir)
config.EnsureRoot(conf.RootDir)
dumpDebugData(outDir, conf, rpc) dumpDebugData(outDir, conf, rpc)
@ -79,7 +79,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
return nil return nil
} }
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
start := time.Now().UTC() start := time.Now().UTC()
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")


+ 3
- 3
cmd/tendermint/commands/debug/kill.go View File

@ -14,7 +14,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/cli"
rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpchttp "github.com/tendermint/tendermint/rpc/client/http"
) )
@ -50,9 +50,9 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
} }
home := viper.GetString(cli.HomeFlag) home := viper.GetString(cli.HomeFlag)
conf := cfg.DefaultConfig()
conf := config.DefaultConfig()
conf = conf.SetRoot(home) conf = conf.SetRoot(home)
cfg.EnsureRoot(conf.RootDir)
config.EnsureRoot(conf.RootDir)
// Create a temporary directory which will contain all the state dumps and // Create a temporary directory which will contain all the state dumps and
// relevant files and directories that will be compressed into a file. // relevant files and directories that will be compressed into a file.


+ 2
- 2
cmd/tendermint/commands/debug/util.go View File

@ -9,7 +9,7 @@ import (
"path" "path"
"path/filepath" "path/filepath"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpchttp "github.com/tendermint/tendermint/rpc/client/http"
) )
@ -48,7 +48,7 @@ func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error {
// copyWAL copies the Tendermint node's WAL file. It returns an error if the // copyWAL copies the Tendermint node's WAL file. It returns an error if the
// WAL file cannot be read or copied. // WAL file cannot be read or copied.
func copyWAL(conf *cfg.Config, dir string) error {
func copyWAL(conf *config.Config, dir string) error {
walPath := conf.Consensus.WalFile() walPath := conf.Consensus.WalFile()
walFile := filepath.Base(walPath) walFile := filepath.Base(walPath)


+ 0
- 1
cmd/tendermint/commands/light.go View File

@ -11,7 +11,6 @@ import (
"time" "time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"


+ 10
- 9
cmd/tendermint/commands/reindex_event.go View File

@ -6,7 +6,7 @@ import (
"strings" "strings"
"github.com/spf13/cobra" "github.com/spf13/cobra"
tmdb "github.com/tendermint/tm-db"
dbm "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types" abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config" tmcfg "github.com/tendermint/tendermint/config"
@ -15,7 +15,7 @@ import (
"github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer"
"github.com/tendermint/tendermint/internal/state/indexer/sink/kv" "github.com/tendermint/tendermint/internal/state/indexer/sink/kv"
"github.com/tendermint/tendermint/internal/state/indexer/sink/psql" "github.com/tendermint/tendermint/internal/state/indexer/sink/psql"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -129,17 +129,17 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
} }
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) { func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := tmdb.BackendType(cfg.DBBackend)
dbType := dbm.BackendType(cfg.DBBackend)
// Get BlockStore // Get BlockStore
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
blockStore := store.NewBlockStore(blockStoreDB) blockStore := store.NewBlockStore(blockStoreDB)
// Get StateStore // Get StateStore
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -221,14 +221,15 @@ func checkValidHeight(bs state.BlockStore) error {
} }
if startHeight < base { if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
return fmt.Errorf("%s (requested start height: %d, base height: %d)",
coretypes.ErrHeightNotAvailable, startHeight, base)
} }
height := bs.Height() height := bs.Height()
if startHeight > height { if startHeight > height {
return fmt.Errorf( return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
"%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, startHeight, height)
} }
if endHeight == 0 || endHeight > height { if endHeight == 0 || endHeight > height {
@ -238,13 +239,13 @@ func checkValidHeight(bs state.BlockStore) error {
if endHeight < base { if endHeight < base {
return fmt.Errorf( return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
"%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, endHeight, base)
} }
if endHeight < startHeight { if endHeight < startHeight {
return fmt.Errorf( return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)", "%s (requested the end height: %d is less than the start height: %d)",
ctypes.ErrInvalidRequest, startHeight, endHeight)
coretypes.ErrInvalidRequest, startHeight, endHeight)
} }
return nil return nil


+ 4
- 4
cmd/tendermint/main.go View File

@ -6,9 +6,9 @@ import (
cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" cmd "github.com/tendermint/tendermint/cmd/tendermint/commands"
"github.com/tendermint/tendermint/cmd/tendermint/commands/debug" "github.com/tendermint/tendermint/cmd/tendermint/commands/debug"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/cli"
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/node"
) )
func main() { func main() {
@ -42,12 +42,12 @@ func main() {
// * Provide their own DB implementation // * Provide their own DB implementation
// can copy this file and use something other than the // can copy this file and use something other than the
// node.NewDefault function // node.NewDefault function
nodeFunc := nm.NewDefault
nodeFunc := node.NewDefault
// Create & start node // Create & start node
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir)))
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir)))
if err := cmd.Execute(); err != nil { if err := cmd.Execute(); err != nil {
panic(err) panic(err)
} }


+ 6
- 5
config/db.go View File

@ -1,9 +1,10 @@
package config package config
import ( import (
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/libs/service"
db "github.com/tendermint/tm-db"
) )
// ServiceProvider takes a config and a logger and returns a ready to go Node. // ServiceProvider takes a config and a logger and returns a ready to go Node.
@ -16,11 +17,11 @@ type DBContext struct {
} }
// DBProvider takes a DBContext and returns an instantiated DB. // DBProvider takes a DBContext and returns an instantiated DB.
type DBProvider func(*DBContext) (db.DB, error)
type DBProvider func(*DBContext) (dbm.DB, error)
// DefaultDBProvider returns a database using the DBBackend and DBDir // DefaultDBProvider returns a database using the DBBackend and DBDir
// specified in the Config. // specified in the Config.
func DefaultDBProvider(ctx *DBContext) (db.DB, error) {
dbType := db.BackendType(ctx.Config.DBBackend)
return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
dbType := dbm.BackendType(ctx.Config.DBBackend)
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
} }

+ 16
- 16
crypto/encoding/codec.go View File

@ -8,34 +8,34 @@ import (
"github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto/sr25519" "github.com/tendermint/tendermint/crypto/sr25519"
"github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/json"
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
) )
func init() { func init() {
json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey")
json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519")
json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1")
json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey")
json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519")
json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1")
} }
// PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey // PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey
func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) {
var kp pc.PublicKey
func PubKeyToProto(k crypto.PubKey) (cryptoproto.PublicKey, error) {
var kp cryptoproto.PublicKey
switch k := k.(type) { switch k := k.(type) {
case ed25519.PubKey: case ed25519.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Ed25519{
kp = cryptoproto.PublicKey{
Sum: &cryptoproto.PublicKey_Ed25519{
Ed25519: k, Ed25519: k,
}, },
} }
case secp256k1.PubKey: case secp256k1.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Secp256K1{
kp = cryptoproto.PublicKey{
Sum: &cryptoproto.PublicKey_Secp256K1{
Secp256K1: k, Secp256K1: k,
}, },
} }
case sr25519.PubKey: case sr25519.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Sr25519{
kp = cryptoproto.PublicKey{
Sum: &cryptoproto.PublicKey_Sr25519{
Sr25519: k, Sr25519: k,
}, },
} }
@ -46,9 +46,9 @@ func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) {
} }
// PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey // PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey
func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) {
func PubKeyFromProto(k cryptoproto.PublicKey) (crypto.PubKey, error) {
switch k := k.Sum.(type) { switch k := k.Sum.(type) {
case *pc.PublicKey_Ed25519:
case *cryptoproto.PublicKey_Ed25519:
if len(k.Ed25519) != ed25519.PubKeySize { if len(k.Ed25519) != ed25519.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d", return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d",
len(k.Ed25519), ed25519.PubKeySize) len(k.Ed25519), ed25519.PubKeySize)
@ -56,7 +56,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) {
pk := make(ed25519.PubKey, ed25519.PubKeySize) pk := make(ed25519.PubKey, ed25519.PubKeySize)
copy(pk, k.Ed25519) copy(pk, k.Ed25519)
return pk, nil return pk, nil
case *pc.PublicKey_Secp256K1:
case *cryptoproto.PublicKey_Secp256K1:
if len(k.Secp256K1) != secp256k1.PubKeySize { if len(k.Secp256K1) != secp256k1.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d", return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d",
len(k.Secp256K1), secp256k1.PubKeySize) len(k.Secp256K1), secp256k1.PubKeySize)
@ -64,7 +64,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) {
pk := make(secp256k1.PubKey, secp256k1.PubKeySize) pk := make(secp256k1.PubKey, secp256k1.PubKeySize)
copy(pk, k.Secp256K1) copy(pk, k.Secp256K1)
return pk, nil return pk, nil
case *pc.PublicKey_Sr25519:
case *cryptoproto.PublicKey_Sr25519:
if len(k.Sr25519) != sr25519.PubKeySize { if len(k.Sr25519) != sr25519.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeySr25519. Got %d, expected %d", return nil, fmt.Errorf("invalid size for PubKeySr25519. Got %d, expected %d",
len(k.Sr25519), sr25519.PubKeySize) len(k.Sr25519), sr25519.PubKeySize)


+ 1
- 2
crypto/secp256k1/secp256k1_test.go View File

@ -5,14 +5,13 @@ import (
"math/big" "math/big"
"testing" "testing"
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcutil/base58" "github.com/btcsuite/btcutil/base58"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/secp256k1"
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
) )
type keyData struct { type keyData struct {


+ 11
- 11
crypto/xchacha20poly1305/xchachapoly_test.go View File

@ -2,8 +2,8 @@ package xchacha20poly1305
import ( import (
"bytes" "bytes"
cr "crypto/rand"
mr "math/rand"
crand "crypto/rand"
mrand "math/rand"
"testing" "testing"
) )
@ -19,23 +19,23 @@ func TestRandom(t *testing.T) {
var nonce [24]byte var nonce [24]byte
var key [32]byte var key [32]byte
al := mr.Intn(128)
pl := mr.Intn(16384)
al := mrand.Intn(128)
pl := mrand.Intn(16384)
ad := make([]byte, al) ad := make([]byte, al)
plaintext := make([]byte, pl) plaintext := make([]byte, pl)
_, err := cr.Read(key[:])
_, err := crand.Read(key[:])
if err != nil { if err != nil {
t.Errorf("error on read: %w", err) t.Errorf("error on read: %w", err)
} }
_, err = cr.Read(nonce[:])
_, err = crand.Read(nonce[:])
if err != nil { if err != nil {
t.Errorf("error on read: %w", err) t.Errorf("error on read: %w", err)
} }
_, err = cr.Read(ad)
_, err = crand.Read(ad)
if err != nil { if err != nil {
t.Errorf("error on read: %w", err) t.Errorf("error on read: %w", err)
} }
_, err = cr.Read(plaintext)
_, err = crand.Read(plaintext)
if err != nil { if err != nil {
t.Errorf("error on read: %w", err) t.Errorf("error on read: %w", err)
} }
@ -59,7 +59,7 @@ func TestRandom(t *testing.T) {
} }
if len(ad) > 0 { if len(ad) > 0 {
alterAdIdx := mr.Intn(len(ad))
alterAdIdx := mrand.Intn(len(ad))
ad[alterAdIdx] ^= 0x80 ad[alterAdIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering additional data", i) t.Errorf("random #%d: Open was successful after altering additional data", i)
@ -67,14 +67,14 @@ func TestRandom(t *testing.T) {
ad[alterAdIdx] ^= 0x80 ad[alterAdIdx] ^= 0x80
} }
alterNonceIdx := mr.Intn(aead.NonceSize())
alterNonceIdx := mrand.Intn(aead.NonceSize())
nonce[alterNonceIdx] ^= 0x80 nonce[alterNonceIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering nonce", i) t.Errorf("random #%d: Open was successful after altering nonce", i)
} }
nonce[alterNonceIdx] ^= 0x80 nonce[alterNonceIdx] ^= 0x80
alterCtIdx := mr.Intn(len(ct))
alterCtIdx := mrand.Intn(len(ct))
ct[alterCtIdx] ^= 0x80 ct[alterCtIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering ciphertext", i) t.Errorf("random #%d: Open was successful after altering ciphertext", i)


+ 8
- 8
internal/blocksync/v0/reactor.go View File

@ -6,13 +6,13 @@ import (
"sync" "sync"
"time" "time"
bc "github.com/tendermint/tendermint/internal/blocksync"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/libs/service"
tmSync "github.com/tendermint/tendermint/libs/sync"
tmsync "github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -36,7 +36,7 @@ var (
Priority: 5, Priority: 5,
SendQueueCapacity: 1000, SendQueueCapacity: 1000,
RecvBufferCapacity: 1024, RecvBufferCapacity: 1024,
RecvMessageCapacity: bc.MaxMsgSize,
RecvMessageCapacity: blocksync.MaxMsgSize,
MaxSendBytes: 100, MaxSendBytes: 100,
}, },
}, },
@ -85,7 +85,7 @@ type Reactor struct {
store *store.BlockStore store *store.BlockStore
pool *BlockPool pool *BlockPool
consReactor consensusReactor consReactor consensusReactor
blockSync *tmSync.AtomicBool
blockSync *tmsync.AtomicBool
blockSyncCh *p2p.Channel blockSyncCh *p2p.Channel
// blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope
@ -107,7 +107,7 @@ type Reactor struct {
// stopping the p2p Channel(s). // stopping the p2p Channel(s).
poolWG sync.WaitGroup poolWG sync.WaitGroup
metrics *cons.Metrics
metrics *consensus.Metrics
syncStartTime time.Time syncStartTime time.Time
} }
@ -122,7 +122,7 @@ func NewReactor(
blockSyncCh *p2p.Channel, blockSyncCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates, peerUpdates *p2p.PeerUpdates,
blockSync bool, blockSync bool,
metrics *cons.Metrics,
metrics *consensus.Metrics,
) (*Reactor, error) { ) (*Reactor, error) {
if state.LastBlockHeight != store.Height() { if state.LastBlockHeight != store.Height() {
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()) return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
@ -142,7 +142,7 @@ func NewReactor(
store: store, store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh), pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor, consReactor: consReactor,
blockSync: tmSync.NewBool(blockSync),
blockSync: tmsync.NewBool(blockSync),
requestsCh: requestsCh, requestsCh: requestsCh,
errorsCh: errorsCh, errorsCh: errorsCh,
blockSyncCh: blockSyncCh, blockSyncCh: blockSyncCh,


+ 17
- 17
internal/blocksync/v0/reactor_test.go View File

@ -6,11 +6,12 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/p2p/p2ptest"
@ -22,7 +23,6 @@ import (
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
) )
type reactorTestSuite struct { type reactorTestSuite struct {
@ -165,7 +165,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
rts.blockSyncChannels[nodeID], rts.blockSyncChannels[nodeID],
rts.peerUpdates[nodeID], rts.peerUpdates[nodeID],
rts.blockSync, rts.blockSync,
cons.NopMetrics())
consensus.NopMetrics())
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, rts.reactors[nodeID].Start()) require.NoError(t, rts.reactors[nodeID].Start())
@ -182,10 +182,10 @@ func (rts *reactorTestSuite) start(t *testing.T) {
} }
func TestReactor_AbruptDisconnect(t *testing.T) { func TestReactor_AbruptDisconnect(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
maxBlockHeight := int64(64) maxBlockHeight := int64(64)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@ -217,10 +217,10 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
} }
func TestReactor_SyncTime(t *testing.T) { func TestReactor_SyncTime(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
maxBlockHeight := int64(101) maxBlockHeight := int64(101)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@ -240,10 +240,10 @@ func TestReactor_SyncTime(t *testing.T) {
} }
func TestReactor_NoBlockResponse(t *testing.T) { func TestReactor_NoBlockResponse(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
maxBlockHeight := int64(65) maxBlockHeight := int64(65)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@ -287,11 +287,11 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
// See: https://github.com/tendermint/tendermint/issues/6005 // See: https://github.com/tendermint/tendermint/issues/6005
t.SkipNow() t.SkipNow()
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(cfg.RootDir)
maxBlockHeight := int64(48) maxBlockHeight := int64(48)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
@ -325,7 +325,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
// //
// XXX: This causes a potential race condition. // XXX: This causes a potential race condition.
// See: https://github.com/tendermint/tendermint/issues/6005 // See: https://github.com/tendermint/tendermint/issues/6005
otherGenDoc, otherPrivVals := factory.RandGenesisDoc(config, 1, false, 30)
otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30)
newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{ newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{
MaxPeers: uint16(len(rts.nodes) + 1), MaxPeers: uint16(len(rts.nodes) + 1),
MaxConnected: uint16(len(rts.nodes) + 1), MaxConnected: uint16(len(rts.nodes) + 1),


+ 5
- 5
internal/blocksync/v2/processor.go View File

@ -3,7 +3,7 @@ package v2
import ( import (
"fmt" "fmt"
tmState "github.com/tendermint/tendermint/internal/state"
tmstate "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -36,7 +36,7 @@ func (e pcBlockProcessed) String() string {
type pcFinished struct { type pcFinished struct {
priorityNormal priorityNormal
blocksSynced int blocksSynced int
tmState tmState.State
tmState tmstate.State
} }
func (p pcFinished) Error() string { func (p pcFinished) Error() string {
@ -148,11 +148,11 @@ func (state *pcState) handle(event Event) (Event, error) {
return noOp, nil return noOp, nil
case rProcessBlock: case rProcessBlock:
tmState := state.context.tmState()
tmstate := state.context.tmState()
firstItem, secondItem, err := state.nextTwo() firstItem, secondItem, err := state.nextTwo()
if err != nil { if err != nil {
if state.draining { if state.draining {
return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil
return pcFinished{tmState: tmstate, blocksSynced: state.blocksSynced}, nil
} }
return noOp, nil return noOp, nil
} }
@ -164,7 +164,7 @@ func (state *pcState) handle(event Event) (Event, error) {
) )
// verify if +second+ last commit "confirms" +first+ block // verify if +second+ last commit "confirms" +first+ block
err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit)
err = state.context.verifyCommit(tmstate.ChainID, firstID, first.Height, second.LastCommit)
if err != nil { if err != nil {
state.purgePeer(firstItem.peerID) state.purgePeer(firstItem.peerID)
if firstItem.peerID != secondItem.peerID { if firstItem.peerID != secondItem.peerID {


+ 3
- 3
internal/blocksync/v2/processor_context.go View File

@ -3,7 +3,7 @@ package v2
import ( import (
"fmt" "fmt"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -21,10 +21,10 @@ type pContext struct {
store blockStore store blockStore
applier blockApplier applier blockApplier
state state.State state state.State
metrics *cons.Metrics
metrics *consensus.Metrics
} }
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext {
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *consensus.Metrics) *pContext {
return &pContext{ return &pContext{
store: st, store: st,
applier: ex, applier: ex,


+ 5
- 5
internal/blocksync/v2/processor_test.go View File

@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
tmState "github.com/tendermint/tendermint/internal/state"
tmstate "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -33,7 +33,7 @@ func makePcBlock(height int64) *types.Block {
// makeState takes test parameters and creates a specific processor state. // makeState takes test parameters and creates a specific processor state.
func makeState(p *params) *pcState { func makeState(p *params) *pcState {
var ( var (
tmState = tmState.State{LastBlockHeight: p.height}
tmState = tmstate.State{LastBlockHeight: p.height}
context = newMockProcessorContext(tmState, p.verBL, p.appBL) context = newMockProcessorContext(tmState, p.verBL, p.appBL)
) )
state := newPcState(context) state := newPcState(context)
@ -207,7 +207,7 @@ func TestRProcessBlockSuccess(t *testing.T) {
{ // finish when H+1 or/and H+2 are missing { // finish when H+1 or/and H+2 are missing
event: rProcessBlock{}, event: rProcessBlock{},
wantState: &params{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, wantState: &params{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1},
wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 1}, blocksSynced: 1},
}, },
}, },
}, },
@ -271,7 +271,7 @@ func TestScFinishedEv(t *testing.T) {
{ {
currentState: &params{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{}, currentState: &params{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{},
wantState: &params{height: 100, items: []pcBlock{}, blocksSynced: 100}, wantState: &params{height: 100, items: []pcBlock{}, blocksSynced: 100},
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100},
}, },
}, },
}, },
@ -282,7 +282,7 @@ func TestScFinishedEv(t *testing.T) {
currentState: &params{height: 100, items: []pcBlock{ currentState: &params{height: 100, items: []pcBlock{
{"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{}, {"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{},
wantState: &params{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, wantState: &params{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100},
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100},
}, },
}, },
}, },


+ 6
- 6
internal/blocksync/v2/reactor.go View File

@ -5,11 +5,11 @@ import (
"fmt" "fmt"
"time" "time"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/consensus"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state"
@ -61,7 +61,7 @@ type blockApplier interface {
// XXX: unify naming in this package around tmState // XXX: unify naming in this package around tmState
func newReactor(state state.State, store blockStore, reporter behavior.Reporter, func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor {
blockApplier blockApplier, blockSync bool, metrics *consensus.Metrics) *BlockchainReactor {
initHeight := state.LastBlockHeight + 1 initHeight := state.LastBlockHeight + 1
if initHeight == 1 { if initHeight == 1 {
initHeight = state.InitialHeight initHeight = state.InitialHeight
@ -91,7 +91,7 @@ func NewBlockchainReactor(
blockApplier blockApplier, blockApplier blockApplier,
store blockStore, store blockStore,
blockSync bool, blockSync bool,
metrics *cons.Metrics) *BlockchainReactor {
metrics *consensus.Metrics) *BlockchainReactor {
reporter := behavior.NewMockReporter() reporter := behavior.NewMockReporter()
return newReactor(state, store, reporter, blockApplier, blockSync, metrics) return newReactor(state, store, reporter, blockApplier, blockSync, metrics)
} }
@ -605,7 +605,7 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
Priority: 5, Priority: 5,
SendQueueCapacity: 2000, SendQueueCapacity: 2000,
RecvBufferCapacity: 1024, RecvBufferCapacity: 1024,
RecvMessageCapacity: bc.MaxMsgSize,
RecvMessageCapacity: blocksync.MaxMsgSize,
}, },
} }
} }


+ 9
- 9
internal/blocksync/v2/reactor_test.go View File

@ -15,9 +15,9 @@ import (
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/conn"
@ -177,7 +177,7 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
require.NoError(t, err) require.NoError(t, err)
} }
r := newReactor(state, store, reporter, appl, true, cons.NopMetrics())
r := newReactor(state, store, reporter, appl, true, consensus.NopMetrics())
logger := log.TestingLogger() logger := log.TestingLogger()
r.SetLogger(logger.With("module", "blockchain")) r.SetLogger(logger.With("module", "blockchain"))
@ -365,9 +365,9 @@ func TestReactorHelperMode(t *testing.T) {
channelID = byte(0x40) channelID = byte(0x40)
) )
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
cfg := config.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
params := testReactorParams{ params := testReactorParams{
logger: log.TestingLogger(), logger: log.TestingLogger(),
@ -455,9 +455,9 @@ func TestReactorHelperMode(t *testing.T) {
} }
func TestReactorSetSwitchNil(t *testing.T) { func TestReactorSetSwitchNil(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
cfg := config.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
reactor := newTestReactor(t, testReactorParams{ reactor := newTestReactor(t, testReactorParams{
logger: log.TestingLogger(), logger: log.TestingLogger(),


+ 2
- 1
internal/consensus/byzantine_test.go View File

@ -10,6 +10,8 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence"
@ -23,7 +25,6 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
) )
// Byzantine node sends two different prevotes (nil and blockID) to the same // Byzantine node sends two different prevotes (nil and blockID) to the same


+ 35
- 37
internal/consensus/common_test.go View File

@ -7,21 +7,19 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path"
"path/filepath" "path/filepath"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"path"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/internal/consensus/types" cstypes "github.com/tendermint/tendermint/internal/consensus/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
@ -49,10 +47,10 @@ const (
// test. // test.
type cleanupFunc func() type cleanupFunc func()
func configSetup(t *testing.T) *cfg.Config {
func configSetup(t *testing.T) *config.Config {
t.Helper() t.Helper()
config := ResetConfig("consensus_reactor_test")
cfg := ResetConfig("consensus_reactor_test")
consensusReplayConfig := ResetConfig("consensus_replay_test") consensusReplayConfig := ResetConfig("consensus_replay_test")
configStateTest := ResetConfig("consensus_state_test") configStateTest := ResetConfig("consensus_state_test")
@ -60,13 +58,13 @@ func configSetup(t *testing.T) *cfg.Config {
configByzantineTest := ResetConfig("consensus_byzantine_test") configByzantineTest := ResetConfig("consensus_byzantine_test")
t.Cleanup(func() { t.Cleanup(func() {
os.RemoveAll(config.RootDir)
os.RemoveAll(cfg.RootDir)
os.RemoveAll(consensusReplayConfig.RootDir) os.RemoveAll(consensusReplayConfig.RootDir)
os.RemoveAll(configStateTest.RootDir) os.RemoveAll(configStateTest.RootDir)
os.RemoveAll(configMempoolTest.RootDir) os.RemoveAll(configMempoolTest.RootDir)
os.RemoveAll(configByzantineTest.RootDir) os.RemoveAll(configByzantineTest.RootDir)
}) })
return config
return cfg
} }
func ensureDir(dir string, mode os.FileMode) { func ensureDir(dir string, mode os.FileMode) {
@ -75,8 +73,8 @@ func ensureDir(dir string, mode os.FileMode) {
} }
} }
func ResetConfig(name string) *cfg.Config {
return cfg.ResetTestRoot(name)
func ResetConfig(name string) *config.Config {
return config.ResetTestRoot(name)
} }
//------------------------------------------------------------------------------- //-------------------------------------------------------------------------------
@ -102,7 +100,7 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida
} }
func (vs *validatorStub) signVote( func (vs *validatorStub) signVote(
config *cfg.Config,
cfg *config.Config,
voteType tmproto.SignedMsgType, voteType tmproto.SignedMsgType,
hash []byte, hash []byte,
header types.PartSetHeader) (*types.Vote, error) { header types.PartSetHeader) (*types.Vote, error) {
@ -122,7 +120,7 @@ func (vs *validatorStub) signVote(
BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
} }
v := vote.ToProto() v := vote.ToProto()
if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil {
if err := vs.PrivValidator.SignVote(context.Background(), cfg.ChainID(), v); err != nil {
return nil, fmt.Errorf("sign vote failed: %w", err) return nil, fmt.Errorf("sign vote failed: %w", err)
} }
@ -141,12 +139,12 @@ func (vs *validatorStub) signVote(
// Sign vote for type/hash/header // Sign vote for type/hash/header
func signVote( func signVote(
vs *validatorStub, vs *validatorStub,
config *cfg.Config,
cfg *config.Config,
voteType tmproto.SignedMsgType, voteType tmproto.SignedMsgType,
hash []byte, hash []byte,
header types.PartSetHeader) *types.Vote { header types.PartSetHeader) *types.Vote {
v, err := vs.signVote(config, voteType, hash, header)
v, err := vs.signVote(cfg, voteType, hash, header)
if err != nil { if err != nil {
panic(fmt.Errorf("failed to sign vote: %v", err)) panic(fmt.Errorf("failed to sign vote: %v", err))
} }
@ -157,14 +155,14 @@ func signVote(
} }
func signVotes( func signVotes(
config *cfg.Config,
cfg *config.Config,
voteType tmproto.SignedMsgType, voteType tmproto.SignedMsgType,
hash []byte, hash []byte,
header types.PartSetHeader, header types.PartSetHeader,
vss ...*validatorStub) []*types.Vote { vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss)) votes := make([]*types.Vote, len(vss))
for i, vs := range vss { for i, vs := range vss {
votes[i] = signVote(vs, config, voteType, hash, header)
votes[i] = signVote(vs, cfg, voteType, hash, header)
} }
return votes return votes
} }
@ -255,14 +253,14 @@ func addVotes(to *State, votes ...*types.Vote) {
} }
func signAddVotes( func signAddVotes(
config *cfg.Config,
cfg *config.Config,
to *State, to *State,
voteType tmproto.SignedMsgType, voteType tmproto.SignedMsgType,
hash []byte, hash []byte,
header types.PartSetHeader, header types.PartSetHeader,
vss ...*validatorStub, vss ...*validatorStub,
) { ) {
votes := signVotes(config, voteType, hash, header, vss...)
votes := signVotes(cfg, voteType, hash, header, vss...)
addVotes(to, votes...) addVotes(to, votes...)
} }
@ -387,12 +385,12 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message {
// consensus states // consensus states
func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State {
config := cfg.ResetTestRoot("consensus_state_test")
return newStateWithConfig(config, state, pv, app)
cfg := config.ResetTestRoot("consensus_state_test")
return newStateWithConfig(cfg, state, pv, app)
} }
func newStateWithConfig( func newStateWithConfig(
thisConfig *cfg.Config,
thisConfig *config.Config,
state sm.State, state sm.State,
pv types.PrivValidator, pv types.PrivValidator,
app abci.Application, app abci.Application,
@ -402,7 +400,7 @@ func newStateWithConfig(
} }
func newStateWithConfigAndBlockStore( func newStateWithConfigAndBlockStore(
thisConfig *cfg.Config,
thisConfig *config.Config,
state sm.State, state sm.State,
pv types.PrivValidator, pv types.PrivValidator,
app abci.Application, app abci.Application,
@ -444,10 +442,10 @@ func newStateWithConfigAndBlockStore(
return cs return cs
} }
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
privValidatorKeyFile := config.PrivValidator.KeyFile()
func loadPrivValidator(cfg *config.Config) *privval.FilePV {
privValidatorKeyFile := cfg.PrivValidator.KeyFile()
ensureDir(filepath.Dir(privValidatorKeyFile), 0700) ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
privValidatorStateFile := config.PrivValidator.StateFile()
privValidatorStateFile := cfg.PrivValidator.StateFile()
privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
if err != nil { if err != nil {
panic(err) panic(err)
@ -456,9 +454,9 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV {
return privValidator return privValidator
} }
func randState(config *cfg.Config, nValidators int) (*State, []*validatorStub) {
func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub) {
// Get State // Get State
state, privVals := randGenesisState(config, nValidators, false, 10)
state, privVals := randGenesisState(cfg, nValidators, false, 10)
vss := make([]*validatorStub, nValidators) vss := make([]*validatorStub, nValidators)
@ -704,15 +702,15 @@ func consensusLogger() log.Logger {
func randConsensusState( func randConsensusState(
t *testing.T, t *testing.T,
config *cfg.Config,
cfg *config.Config,
nValidators int, nValidators int,
testName string, testName string,
tickerFunc func() TimeoutTicker, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application, appFunc func() abci.Application,
configOpts ...func(*cfg.Config),
configOpts ...func(*config.Config),
) ([]*State, cleanupFunc) { ) ([]*State, cleanupFunc) {
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, 30)
css := make([]*State, nValidators) css := make([]*State, nValidators)
logger := consensusLogger() logger := consensusLogger()
@ -759,18 +757,18 @@ func randConsensusState(
// nPeers = nValidators + nNotValidator // nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers( func randConsensusNetWithPeers(
config *cfg.Config,
cfg *config.Config,
nValidators, nValidators,
nPeers int, nPeers int,
testName string, testName string,
tickerFunc func() TimeoutTicker, tickerFunc func() TimeoutTicker,
appFunc func(string) abci.Application, appFunc func(string) abci.Application,
) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, testMinPower)
) ([]*State, *types.GenesisDoc, *config.Config, cleanupFunc) {
genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, testMinPower)
css := make([]*State, nPeers) css := make([]*State, nPeers)
logger := consensusLogger() logger := consensusLogger()
var peer0Config *cfg.Config
var peer0Config *config.Config
configRootDirs := make([]string, 0, nPeers) configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ { for i := 0; i < nPeers; i++ {
state, _ := sm.MakeGenesisState(genDoc) state, _ := sm.MakeGenesisState(genDoc)
@ -799,7 +797,7 @@ func randConsensusNetWithPeers(
} }
} }
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
app := appFunc(path.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
vals := types.TM2PB.ValidatorUpdates(state.Validators) vals := types.TM2PB.ValidatorUpdates(state.Validators)
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
// simulate handshake, receive app version. If don't do this, replay test will fail // simulate handshake, receive app version. If don't do this, replay test will fail
@ -820,12 +818,12 @@ func randConsensusNetWithPeers(
} }
func randGenesisState( func randGenesisState(
config *cfg.Config,
cfg *config.Config,
numValidators int, numValidators int,
randPower bool, randPower bool,
minPower int64) (sm.State, []types.PrivValidator) { minPower int64) (sm.State, []types.PrivValidator) {
genDoc, privValidators := factory.RandGenesisDoc(config, numValidators, randPower, minPower)
genDoc, privValidators := factory.RandGenesisDoc(cfg, numValidators, randPower, minPower)
s0, _ := sm.MakeGenesisState(genDoc) s0, _ := sm.MakeGenesisState(genDoc)
return s0, privValidators return s0, privValidators
} }


+ 5
- 6
internal/consensus/mempool_test.go View File

@ -10,20 +10,19 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/code"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/mempool"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
// for testing // for testing
func assertMempool(txn txNotifier) mempl.Mempool {
return txn.(mempl.Mempool)
func assertMempool(txn txNotifier) mempool.Mempool {
return txn.(mempool.Mempool)
} }
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
@ -113,7 +112,7 @@ func deliverTxsRange(cs *State, start, end int) {
for i := start; i < end; i++ { for i := start; i < end; i++ {
txBytes := make([]byte, 8) txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i)) binary.BigEndian.PutUint64(txBytes, uint64(i))
err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempl.TxInfo{})
err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{})
if err != nil { if err != nil {
panic(fmt.Sprintf("Error after CheckTx: %v", err)) panic(fmt.Sprintf("Error after CheckTx: %v", err))
} }
@ -179,7 +178,7 @@ func TestMempoolRmBadTx(t *testing.T) {
return return
} }
checkTxRespCh <- struct{}{} checkTxRespCh <- struct{}{}
}, mempl.TxInfo{})
}, mempool.TxInfo{})
if err != nil { if err != nil {
t.Errorf("error after CheckTx: %v", err) t.Errorf("error after CheckTx: %v", err)
return return


+ 22
- 22
internal/consensus/reactor_test.go View File

@ -12,12 +12,13 @@ import (
"github.com/fortytw2/leaktest" "github.com/fortytw2/leaktest"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/encoding"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
@ -31,7 +32,6 @@ import (
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
) )
var ( var (
@ -273,11 +273,11 @@ func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, he
} }
func TestReactorBasic(t *testing.T) { func TestReactorBasic(t *testing.T) {
config := configSetup(t)
cfg := configSetup(t)
n := 4 n := 4
states, cleanup := randConsensusState(t, states, cleanup := randConsensusState(t,
config, n, "consensus_reactor_test",
cfg, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore) newMockTickerFunc(true), newKVStore)
t.Cleanup(cleanup) t.Cleanup(cleanup)
@ -316,14 +316,14 @@ func TestReactorBasic(t *testing.T) {
} }
func TestReactorWithEvidence(t *testing.T) { func TestReactorWithEvidence(t *testing.T) {
config := configSetup(t)
cfg := configSetup(t)
n := 4 n := 4
testName := "consensus_reactor_test" testName := "consensus_reactor_test"
tickerFunc := newMockTickerFunc(true) tickerFunc := newMockTickerFunc(true)
appFunc := newKVStore appFunc := newKVStore
genDoc, privVals := factory.RandGenesisDoc(config, n, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, n, false, 30)
states := make([]*State, n) states := make([]*State, n)
logger := consensusLogger() logger := consensusLogger()
@ -360,7 +360,7 @@ func TestReactorWithEvidence(t *testing.T) {
// everyone includes evidence of another double signing // everyone includes evidence of another double signing
vIdx := (i + 1) % n vIdx := (i + 1) % n
ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID())
ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], cfg.ChainID())
evpool := &statemocks.EvidencePool{} evpool := &statemocks.EvidencePool{}
evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{
@ -412,17 +412,17 @@ func TestReactorWithEvidence(t *testing.T) {
} }
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
config := configSetup(t)
cfg := configSetup(t)
n := 4 n := 4
states, cleanup := randConsensusState( states, cleanup := randConsensusState(
t, t,
config,
cfg,
n, n,
"consensus_reactor_test", "consensus_reactor_test",
newMockTickerFunc(true), newMockTickerFunc(true),
newKVStore, newKVStore,
func(c *cfg.Config) {
func(c *config.Config) {
c.Consensus.CreateEmptyBlocks = false c.Consensus.CreateEmptyBlocks = false
}, },
) )
@ -462,11 +462,11 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
} }
func TestReactorRecordsVotesAndBlockParts(t *testing.T) { func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
config := configSetup(t)
cfg := configSetup(t)
n := 4 n := 4
states, cleanup := randConsensusState(t, states, cleanup := randConsensusState(t,
config, n, "consensus_reactor_test",
cfg, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore) newMockTickerFunc(true), newKVStore)
t.Cleanup(cleanup) t.Cleanup(cleanup)
@ -521,12 +521,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
} }
func TestReactorVotingPowerChange(t *testing.T) { func TestReactorVotingPowerChange(t *testing.T) {
config := configSetup(t)
cfg := configSetup(t)
n := 4 n := 4
states, cleanup := randConsensusState( states, cleanup := randConsensusState(
t, t,
config,
cfg,
n, n,
"consensus_voting_power_changes_test", "consensus_voting_power_changes_test",
newMockTickerFunc(true), newMockTickerFunc(true),
@ -573,7 +573,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
val1PubKey, err := states[0].privValidator.GetPubKey(context.Background()) val1PubKey, err := states[0].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey)
val1PubKeyABCI, err := encoding.PubKeyToProto(val1PubKey)
require.NoError(t, err) require.NoError(t, err)
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
@ -622,12 +622,12 @@ func TestReactorVotingPowerChange(t *testing.T) {
} }
func TestReactorValidatorSetChanges(t *testing.T) { func TestReactorValidatorSetChanges(t *testing.T) {
config := configSetup(t)
cfg := configSetup(t)
nPeers := 7 nPeers := 7
nVals := 4 nVals := 4
states, _, _, cleanup := randConsensusNetWithPeers( states, _, _, cleanup := randConsensusNetWithPeers(
config,
cfg,
nVals, nVals,
nPeers, nPeers,
"consensus_val_set_changes_test", "consensus_val_set_changes_test",
@ -668,7 +668,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)
valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1)
require.NoError(t, err) require.NoError(t, err)
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
@ -701,7 +701,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)
updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1)
require.NoError(t, err) require.NoError(t, err)
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
@ -721,7 +721,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background()) newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)
newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2)
require.NoError(t, err) require.NoError(t, err)
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
@ -729,7 +729,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background()) newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)
newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3)
require.NoError(t, err) require.NoError(t, err)
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)


+ 9
- 9
internal/consensus/replay_file.go View File

@ -12,7 +12,7 @@ import (
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
@ -31,8 +31,8 @@ const (
// replay messages interactively or all at once // replay messages interactively or all at once
// replay the wal file // replay the wal file
func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) {
consensusState := newConsensusStateForReplay(config, csConfig)
func RunReplayFile(cfg config.BaseConfig, csConfig *config.ConsensusConfig, console bool) {
consensusState := newConsensusStateForReplay(cfg, csConfig)
if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil {
tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err))
@ -286,22 +286,22 @@ func (pb *playback) replayConsoleLoop() int {
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
// convenience for replay mode // convenience for replay mode
func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State {
dbType := dbm.BackendType(config.DBBackend)
func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.ConsensusConfig) *State {
dbType := dbm.BackendType(cfg.DBBackend)
// Get BlockStore // Get BlockStore
blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir())
blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil { if err != nil {
tmos.Exit(err.Error()) tmos.Exit(err.Error())
} }
blockStore := store.NewBlockStore(blockStoreDB) blockStore := store.NewBlockStore(blockStoreDB)
// Get State // Get State
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir())
if err != nil { if err != nil {
tmos.Exit(err.Error()) tmos.Exit(err.Error())
} }
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
gdoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
if err != nil { if err != nil {
tmos.Exit(err.Error()) tmos.Exit(err.Error())
} }
@ -311,7 +311,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
} }
// Create proxyAppConn connection (consensus, mempool, query) // Create proxyAppConn connection (consensus, mempool, query)
clientCreator, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir())
proxyApp := proxy.NewAppConns(clientCreator) proxyApp := proxy.NewAppConns(clientCreator)
err = proxyApp.Start() err = proxyApp.Start()
if err != nil { if err != nil {


+ 5
- 5
internal/consensus/replay_stubs.go View File

@ -6,7 +6,7 @@ import (
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/libs/clist"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -16,12 +16,12 @@ import (
type emptyMempool struct{} type emptyMempool struct{}
var _ mempl.Mempool = emptyMempool{}
var _ mempool.Mempool = emptyMempool{}
func (emptyMempool) Lock() {} func (emptyMempool) Lock() {}
func (emptyMempool) Unlock() {} func (emptyMempool) Unlock() {}
func (emptyMempool) Size() int { return 0 } func (emptyMempool) Size() int { return 0 }
func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error {
return nil return nil
} }
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
@ -30,8 +30,8 @@ func (emptyMempool) Update(
_ int64, _ int64,
_ types.Txs, _ types.Txs,
_ []*abci.ResponseDeliverTx, _ []*abci.ResponseDeliverTx,
_ mempl.PreCheckFunc,
_ mempl.PostCheckFunc,
_ mempool.PreCheckFunc,
_ mempool.PostCheckFunc,
) error { ) error {
return nil return nil
} }


+ 58
- 58
internal/consensus/replay_test.go View File

@ -22,10 +22,10 @@ import (
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
sf "github.com/tendermint/tendermint/internal/state/test/factory" sf "github.com/tendermint/tendermint/internal/state/test/factory"
@ -54,7 +54,7 @@ import (
// and which ones we need the wal for - then we'd also be able to only flush the // and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message. // wal writer when we need to, instead of with every message.
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Config,
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
logger := log.TestingLogger() logger := log.TestingLogger()
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
@ -103,7 +103,7 @@ func sendTxs(ctx context.Context, cs *State) {
return return
default: default:
tx := []byte{byte(i)} tx := []byte{byte(i)}
if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempl.TxInfo{}); err != nil {
if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil {
panic(err) panic(err)
} }
i++ i++
@ -137,7 +137,7 @@ func TestWALCrash(t *testing.T) {
} }
} }
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config,
initFn func(dbm.DB, *State, context.Context), heightToStop int64) { initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
walPanicked := make(chan error) walPanicked := make(chan error)
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
@ -286,12 +286,12 @@ func (w *crashingWAL) Wait() { w.next.Wait() }
//------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------
type simulatorTestSuite struct { type simulatorTestSuite struct {
GenesisState sm.State GenesisState sm.State
Config *cfg.Config
Config *config.Config
Chain []*types.Block Chain []*types.Block
Commits []*types.Commit Commits []*types.Commit
CleanupFunc cleanupFunc CleanupFunc cleanupFunc
Mempool mempl.Mempool
Mempool mempool.Mempool
Evpool sm.EvidencePool Evpool sm.EvidencePool
} }
@ -311,7 +311,7 @@ var modes = []uint{0, 1, 2, 3}
// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay
func setupSimulator(t *testing.T) *simulatorTestSuite { func setupSimulator(t *testing.T) *simulatorTestSuite {
t.Helper() t.Helper()
config := configSetup(t)
cfg := configSetup(t)
sim := &simulatorTestSuite{ sim := &simulatorTestSuite{
Mempool: emptyMempool{}, Mempool: emptyMempool{},
@ -321,14 +321,14 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
nPeers := 7 nPeers := 7
nVals := 4 nVals := 4
css, genDoc, config, cleanup := randConsensusNetWithPeers(
config,
css, genDoc, cfg, cleanup := randConsensusNetWithPeers(
cfg,
nVals, nVals,
nPeers, nPeers,
"replay_test", "replay_test",
newMockTickerFunc(true), newMockTickerFunc(true),
newPersistentKVStoreWithPath) newPersistentKVStoreWithPath)
sim.Config = config
sim.Config = cfg
sim.GenesisState, _ = sm.MakeGenesisState(genDoc) sim.GenesisState, _ = sm.MakeGenesisState(genDoc)
sim.CleanupFunc = cleanup sim.CleanupFunc = cleanup
@ -361,10 +361,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
incrementHeight(vss...) incrementHeight(vss...)
newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)
valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1)
require.NoError(t, err) require.NoError(t, err)
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempool.TxInfo{})
assert.Nil(t, err) assert.Nil(t, err)
propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts := propBlock.MakePartSet(partSize) propBlockParts := propBlock.MakePartSet(partSize)
@ -372,7 +372,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
proposal := types.NewProposal(vss[1].Height, round, -1, blockID) proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
p := proposal.ToProto() p := proposal.ToProto()
if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil {
if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err) t.Fatal("failed to sign bad proposal", err)
} }
proposal.Signature = p.Signature proposal.Signature = p.Signature
@ -393,10 +393,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
incrementHeight(vss...) incrementHeight(vss...)
updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)
updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1)
require.NoError(t, err) require.NoError(t, err)
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempool.TxInfo{})
assert.Nil(t, err) assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize) propBlockParts = propBlock.MakePartSet(partSize)
@ -404,7 +404,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
proposal = types.NewProposal(vss[2].Height, round, -1, blockID) proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
p = proposal.ToProto() p = proposal.ToProto()
if err := vss[2].SignProposal(context.Background(), config.ChainID(), p); err != nil {
if err := vss[2].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err) t.Fatal("failed to sign bad proposal", err)
} }
proposal.Signature = p.Signature proposal.Signature = p.Signature
@ -425,17 +425,17 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
incrementHeight(vss...) incrementHeight(vss...)
newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background()) newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)
newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2)
require.NoError(t, err) require.NoError(t, err)
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempool.TxInfo{})
assert.Nil(t, err) assert.Nil(t, err)
newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background()) newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)
newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3)
require.NoError(t, err) require.NoError(t, err)
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempool.TxInfo{})
assert.Nil(t, err) assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize) propBlockParts = propBlock.MakePartSet(partSize)
@ -463,7 +463,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
proposal = types.NewProposal(vss[3].Height, round, -1, blockID) proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
p = proposal.ToProto() p = proposal.ToProto()
if err := vss[3].SignProposal(context.Background(), config.ChainID(), p); err != nil {
if err := vss[3].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err) t.Fatal("failed to sign bad proposal", err)
} }
proposal.Signature = p.Signature proposal.Signature = p.Signature
@ -475,7 +475,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
ensureNewProposal(proposalCh, height, round) ensureNewProposal(proposalCh, height, round)
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempool.TxInfo{})
assert.Nil(t, err) assert.Nil(t, err)
rs = css[0].GetRoundState() rs = css[0].GetRoundState()
@ -514,7 +514,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
height++ height++
incrementHeight(vss...) incrementHeight(vss...)
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempool.TxInfo{})
assert.Nil(t, err) assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize) propBlockParts = propBlock.MakePartSet(partSize)
@ -526,7 +526,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
selfIndex = valIndexFn(0) selfIndex = valIndexFn(0)
proposal = types.NewProposal(vss[1].Height, round, -1, blockID) proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
p = proposal.ToProto() p = proposal.ToProto()
if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil {
if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err) t.Fatal("failed to sign bad proposal", err)
} }
proposal.Signature = p.Signature proposal.Signature = p.Signature
@ -611,8 +611,8 @@ func TestHandshakeReplayNone(t *testing.T) {
// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx
func TestMockProxyApp(t *testing.T) { func TestMockProxyApp(t *testing.T) {
sim := setupSimulator(t) // setup config and simulator sim := setupSimulator(t) // setup config and simulator
config := sim.Config
assert.NotNil(t, config)
cfg := sim.Config
assert.NotNil(t, cfg)
logger := log.TestingLogger() logger := log.TestingLogger()
var validTxs, invalidTxs = 0, 0 var validTxs, invalidTxs = 0, 0
@ -687,7 +687,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
var stateDB dbm.DB var stateDB dbm.DB
var genesisState sm.State var genesisState sm.State
config := sim.Config
cfg := sim.Config
if testValidatorsChange { if testValidatorsChange {
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
@ -695,19 +695,19 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
stateDB = dbm.NewMemDB() stateDB = dbm.NewMemDB()
genesisState = sim.GenesisState genesisState = sim.GenesisState
config = sim.Config
cfg = sim.Config
chain = append([]*types.Block{}, sim.Chain...) // copy chain chain = append([]*types.Block{}, sim.Chain...) // copy chain
commits = sim.Commits commits = sim.Commits
store = newMockBlockStore(config, genesisState.ConsensusParams)
store = newMockBlockStore(cfg, genesisState.ConsensusParams)
} else { // test single node } else { // test single node
testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
defer func() { _ = os.RemoveAll(testConfig.RootDir) }() defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
walBody, err := WALWithNBlocks(t, numBlocks) walBody, err := WALWithNBlocks(t, numBlocks)
require.NoError(t, err) require.NoError(t, err)
walFile := tempWALWithData(walBody) walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
cfg.Consensus.SetWalFile(walFile)
privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
require.NoError(t, err) require.NoError(t, err)
wal, err := NewWAL(walFile) wal, err := NewWAL(walFile)
@ -724,7 +724,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
require.NoError(t, err) require.NoError(t, err)
pubKey, err := privVal.GetPubKey(context.Background()) pubKey, err := privVal.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion)
stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion)
} }
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
@ -733,12 +733,12 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
state := genesisState.Copy() state := genesisState.Copy()
// run the chain through state.ApplyBlock to build up the tendermint state // run the chain through state.ApplyBlock to build up the tendermint state
state = buildTMStateFromChain(config, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store)
state = buildTMStateFromChain(cfg, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store)
latestAppHash := state.AppHash latestAppHash := state.AppHash
// make a new client creator // make a new client creator
kvstoreApp := kvstore.NewPersistentKVStoreApplication( kvstoreApp := kvstore.NewPersistentKVStoreApplication(
filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int())))
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int())))
t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) }) t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) })
clientCreator2 := abciclient.NewLocalCreator(kvstoreApp) clientCreator2 := abciclient.NewLocalCreator(kvstoreApp)
@ -763,7 +763,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
} }
// now start the app using the handshake - it should sync // now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
handshaker := NewHandshaker(stateStore, state, store, genDoc) handshaker := NewHandshaker(stateStore, state, store, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2) proxyApp := proxy.NewAppConns(clientCreator2)
if err := proxyApp.Start(); err != nil { if err := proxyApp.Start(); err != nil {
@ -811,7 +811,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
} }
func applyBlock(stateStore sm.Store, func applyBlock(stateStore sm.Store,
mempool mempl.Mempool,
mempool mempool.Mempool,
evpool sm.EvidencePool, evpool sm.EvidencePool,
st sm.State, st sm.State,
blk *types.Block, blk *types.Block,
@ -831,7 +831,7 @@ func applyBlock(stateStore sm.Store,
func buildAppStateFromChain( func buildAppStateFromChain(
proxyApp proxy.AppConns, proxyApp proxy.AppConns,
stateStore sm.Store, stateStore sm.Store,
mempool mempl.Mempool,
mempool mempool.Mempool,
evpool sm.EvidencePool, evpool sm.EvidencePool,
state sm.State, state sm.State,
chain []*types.Block, chain []*types.Block,
@ -878,8 +878,8 @@ func buildAppStateFromChain(
} }
func buildTMStateFromChain( func buildTMStateFromChain(
config *cfg.Config,
mempool mempl.Mempool,
cfg *config.Config,
mempool mempool.Mempool,
evpool sm.EvidencePool, evpool sm.EvidencePool,
stateStore sm.Store, stateStore sm.Store,
state sm.State, state sm.State,
@ -889,7 +889,7 @@ func buildTMStateFromChain(
blockStore *mockBlockStore) sm.State { blockStore *mockBlockStore) sm.State {
// run the whole chain against this client to build up the tendermint state // run the whole chain against this client to build up the tendermint state
kvstoreApp := kvstore.NewPersistentKVStoreApplication( kvstoreApp := kvstore.NewPersistentKVStoreApplication(
filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))
defer kvstoreApp.Close() defer kvstoreApp.Close()
clientCreator := abciclient.NewLocalCreator(kvstoreApp) clientCreator := abciclient.NewLocalCreator(kvstoreApp)
@ -938,16 +938,16 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
// - 0x01 // - 0x01
// - 0x02 // - 0x02
// - 0x03 // - 0x03
config := ResetConfig("handshake_test_")
t.Cleanup(func() { os.RemoveAll(config.RootDir) })
privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
cfg := ResetConfig("handshake_test_")
t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
require.NoError(t, err) require.NoError(t, err)
const appVersion = 0x0 const appVersion = 0x0
pubKey, err := privVal.GetPubKey(context.Background()) pubKey, err := privVal.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
stateDB, state, store := stateAndStore(config, pubKey, appVersion)
stateDB, state, store := stateAndStore(cfg, pubKey, appVersion)
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
state.LastValidators = state.Validators.Copy() state.LastValidators = state.Validators.Copy()
// mode = 0 for committing all the blocks // mode = 0 for committing all the blocks
blocks := sf.MakeBlocks(3, &state, privVal) blocks := sf.MakeBlocks(3, &state, privVal)
@ -1153,14 +1153,14 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
// fresh state and mock store // fresh state and mock store
func stateAndStore( func stateAndStore(
config *cfg.Config,
cfg *config.Config,
pubKey crypto.PubKey, pubKey crypto.PubKey,
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB() stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
state.Version.Consensus.App = appVersion state.Version.Consensus.App = appVersion
store := newMockBlockStore(config, state.ConsensusParams)
store := newMockBlockStore(cfg, state.ConsensusParams)
if err := stateStore.Save(state); err != nil { if err := stateStore.Save(state); err != nil {
panic(err) panic(err)
} }
@ -1171,7 +1171,7 @@ func stateAndStore(
// mock block store // mock block store
type mockBlockStore struct { type mockBlockStore struct {
config *cfg.Config
cfg *config.Config
params types.ConsensusParams params types.ConsensusParams
chain []*types.Block chain []*types.Block
commits []*types.Commit commits []*types.Commit
@ -1179,8 +1179,8 @@ type mockBlockStore struct {
} }
// TODO: NewBlockStore(db.NewMemDB) ... // TODO: NewBlockStore(db.NewMemDB) ...
func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
return &mockBlockStore{config, params, nil, nil, 0}
func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore {
return &mockBlockStore{cfg, params, nil, nil, 0}
} }
func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) }
@ -1228,20 +1228,20 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := abciclient.NewLocalCreator(app) clientCreator := abciclient.NewLocalCreator(app)
config := ResetConfig("handshake_test_")
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
cfg := ResetConfig("handshake_test_")
t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) })
privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
require.NoError(t, err) require.NoError(t, err)
pubKey, err := privVal.GetPubKey(context.Background()) pubKey, err := privVal.GetPubKey(context.Background())
require.NoError(t, err) require.NoError(t, err)
stateDB, state, store := stateAndStore(config, pubKey, 0x0)
stateDB, state, store := stateAndStore(cfg, pubKey, 0x0)
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
oldValAddr := state.Validators.Validators[0].Address oldValAddr := state.Validators.Validators[0].Address
// now start the app using the handshake - it should sync // now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
handshaker := NewHandshaker(stateStore, state, store, genDoc) handshaker := NewHandshaker(stateStore, state, store, genDoc)
proxyApp := proxy.NewAppConns(clientCreator) proxyApp := proxy.NewAppConns(clientCreator)
if err := proxyApp.Start(); err != nil { if err := proxyApp.Start(); err != nil {


+ 4
- 4
internal/consensus/state.go View File

@ -12,7 +12,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
cstypes "github.com/tendermint/tendermint/internal/consensus/types" cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/libs/fail" "github.com/tendermint/tendermint/internal/libs/fail"
@ -80,7 +80,7 @@ type State struct {
service.BaseService service.BaseService
// config details // config details
config *cfg.ConsensusConfig
config *config.ConsensusConfig
privValidator types.PrivValidator // for signing votes privValidator types.PrivValidator // for signing votes
privValidatorType types.PrivValidatorType privValidatorType types.PrivValidatorType
@ -152,7 +152,7 @@ type StateOption func(*State)
// NewState returns a new State. // NewState returns a new State.
func NewState( func NewState(
config *cfg.ConsensusConfig,
cfg *config.ConsensusConfig,
state sm.State, state sm.State,
blockExec *sm.BlockExecutor, blockExec *sm.BlockExecutor,
blockStore sm.BlockStore, blockStore sm.BlockStore,
@ -161,7 +161,7 @@ func NewState(
options ...StateOption, options ...StateOption,
) *State { ) *State {
cs := &State{ cs := &State{
config: config,
config: cfg,
blockExec: blockExec, blockExec: blockExec,
blockStore: blockStore, blockStore: blockStore,
txNotifier: txNotifier, txNotifier: txNotifier,


+ 6
- 6
internal/consensus/types/height_vote_set_test.go View File

@ -6,7 +6,7 @@ import (
"os" "os"
"testing" "testing"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/internal/test/factory"
tmrand "github.com/tendermint/tendermint/libs/rand" tmrand "github.com/tendermint/tendermint/libs/rand"
@ -15,19 +15,19 @@ import (
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
var config *cfg.Config // NOTE: must be reset for each _test.go file
var cfg *config.Config // NOTE: must be reset for each _test.go file
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
cfg = config.ResetTestRoot("consensus_height_vote_set_test")
code := m.Run() code := m.Run()
os.RemoveAll(config.RootDir)
os.RemoveAll(cfg.RootDir)
os.Exit(code) os.Exit(code)
} }
func TestPeerCatchupRounds(t *testing.T) { func TestPeerCatchupRounds(t *testing.T) {
valSet, privVals := factory.RandValidatorSet(10, 1) valSet, privVals := factory.RandValidatorSet(10, 1)
hvs := NewHeightVoteSet(config.ChainID(), 1, valSet)
hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet)
vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) vote999_0 := makeVoteHR(t, 1, 0, 999, privVals)
added, err := hvs.AddVote(vote999_0, "peer1") added, err := hvs.AddVote(vote999_0, "peer1")
@ -75,7 +75,7 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty
Type: tmproto.PrecommitType, Type: tmproto.PrecommitType,
BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}},
} }
chainID := config.ChainID()
chainID := cfg.ChainID()
v := vote.ToProto() v := vote.ToProto()
err = privVal.SignVote(context.Background(), chainID, v) err = privVal.SignVote(context.Background(), chainID, v)


+ 11
- 11
internal/consensus/wal_generator.go View File

@ -11,11 +11,11 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
db "github.com/tendermint/tm-db"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
@ -30,9 +30,9 @@ import (
// (byteBufferWAL) and waits until numBlocks are created. // (byteBufferWAL) and waits until numBlocks are created.
// If the node fails to produce given numBlocks, it returns an error. // If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
config := getConfig(t)
cfg := getConfig(t)
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator"))
t.Cleanup(func() { require.NoError(t, app.Close()) }) t.Cleanup(func() { require.NoError(t, app.Close()) })
logger := log.TestingLogger().With("wal_generator", "wal_generator") logger := log.TestingLogger().With("wal_generator", "wal_generator")
@ -41,17 +41,17 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
// NOTE: we can't import node package because of circular dependency. // NOTE: we can't import node package because of circular dependency.
// NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly.
privValidatorKeyFile := config.PrivValidator.KeyFile()
privValidatorStateFile := config.PrivValidator.StateFile()
privValidatorKeyFile := cfg.PrivValidator.KeyFile()
privValidatorStateFile := cfg.PrivValidator.StateFile()
privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
if err != nil { if err != nil {
return err return err
} }
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil { if err != nil {
return fmt.Errorf("failed to read genesis file: %w", err) return fmt.Errorf("failed to read genesis file: %w", err)
} }
blockStoreDB := db.NewMemDB()
blockStoreDB := dbm.NewMemDB()
stateDB := blockStoreDB stateDB := blockStoreDB
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc) state, err := sm.MakeGenesisState(genDoc)
@ -89,7 +89,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
mempool := emptyMempool{} mempool := emptyMempool{}
evpool := sm.EmptyEvidencePool{} evpool := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
consensusState := NewState(cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
consensusState.SetLogger(logger) consensusState.SetLogger(logger)
consensusState.SetEventBus(eventBus) consensusState.SetEventBus(eventBus)
if privValidator != nil && privValidator != (*privval.FilePV)(nil) { if privValidator != nil && privValidator != (*privval.FilePV)(nil) {
@ -153,8 +153,8 @@ func makeAddrs() (string, string, string) {
} }
// getConfig returns a config for test cases // getConfig returns a config for test cases
func getConfig(t *testing.T) *cfg.Config {
c := cfg.ResetTestRoot(t.Name())
func getConfig(t *testing.T) *config.Config {
c := config.ResetTestRoot(t.Name())
// and we use random ports to run in parallel // and we use random ports to run in parallel
tm, rpc, grpc := makeAddrs() tm, rpc, grpc := makeAddrs()


+ 0
- 1
internal/evidence/verify_test.go View File

@ -8,7 +8,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"


+ 5
- 5
internal/mempool/mock/mempool.go View File

@ -5,19 +5,19 @@ import (
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/libs/clist"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
// Mempool is an empty implementation of a Mempool, useful for testing. // Mempool is an empty implementation of a Mempool, useful for testing.
type Mempool struct{} type Mempool struct{}
var _ mempl.Mempool = Mempool{}
var _ mempool.Mempool = Mempool{}
func (Mempool) Lock() {} func (Mempool) Lock() {}
func (Mempool) Unlock() {} func (Mempool) Unlock() {}
func (Mempool) Size() int { return 0 } func (Mempool) Size() int { return 0 }
func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error {
return nil return nil
} }
func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
@ -26,8 +26,8 @@ func (Mempool) Update(
_ int64, _ int64,
_ types.Txs, _ types.Txs,
_ []*abci.ResponseDeliverTx, _ []*abci.ResponseDeliverTx,
_ mempl.PreCheckFunc,
_ mempl.PostCheckFunc,
_ mempool.PreCheckFunc,
_ mempool.PostCheckFunc,
) error { ) error {
return nil return nil
} }


+ 6
- 6
internal/mempool/v0/clist_mempool.go View File

@ -8,7 +8,7 @@ import (
"sync/atomic" "sync/atomic"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/libs/clist"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
@ -32,7 +32,7 @@ type CListMempool struct {
notifiedTxsAvailable bool notifiedTxsAvailable bool
txsAvailable chan struct{} // fires once for each height, when the mempool is not empty txsAvailable chan struct{} // fires once for each height, when the mempool is not empty
config *cfg.MempoolConfig
config *config.MempoolConfig
// Exclusive mutex for Update method to prevent concurrent execution of // Exclusive mutex for Update method to prevent concurrent execution of
// CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods.
@ -69,14 +69,14 @@ type CListMempoolOption func(*CListMempool)
// NewCListMempool returns a new mempool with the given configuration and // NewCListMempool returns a new mempool with the given configuration and
// connection to an application. // connection to an application.
func NewCListMempool( func NewCListMempool(
config *cfg.MempoolConfig,
cfg *config.MempoolConfig,
proxyAppConn proxy.AppConnMempool, proxyAppConn proxy.AppConnMempool,
height int64, height int64,
options ...CListMempoolOption, options ...CListMempoolOption,
) *CListMempool { ) *CListMempool {
mp := &CListMempool{ mp := &CListMempool{
config: config,
config: cfg,
proxyAppConn: proxyAppConn, proxyAppConn: proxyAppConn,
txs: clist.New(), txs: clist.New(),
height: height, height: height,
@ -86,8 +86,8 @@ func NewCListMempool(
metrics: mempool.NopMetrics(), metrics: mempool.NopMetrics(),
} }
if config.CacheSize > 0 {
mp.cache = mempool.NewLRUTxCache(config.CacheSize)
if cfg.CacheSize > 0 {
mp.cache = mempool.NewLRUTxCache(cfg.CacheSize)
} else { } else {
mp.cache = mempool.NopTxCache{} mp.cache = mempool.NopTxCache{}
} }


+ 12
- 12
internal/mempool/v0/clist_mempool_test.go View File

@ -19,7 +19,7 @@ import (
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server" abciserver "github.com/tendermint/tendermint/abci/server"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand" tmrand "github.com/tendermint/tendermint/libs/rand"
@ -32,10 +32,10 @@ import (
type cleanupFunc func() type cleanupFunc func()
func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc) { func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc) {
return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test"))
return newMempoolWithAppAndConfig(cc, config.ResetTestRoot("mempool_test"))
} }
func newMempoolWithAppAndConfig(cc abciclient.Creator, config *cfg.Config) (*CListMempool, cleanupFunc) {
func newMempoolWithAppAndConfig(cc abciclient.Creator, cfg *config.Config) (*CListMempool, cleanupFunc) {
appConnMem, _ := cc() appConnMem, _ := cc()
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
err := appConnMem.Start() err := appConnMem.Start()
@ -43,10 +43,10 @@ func newMempoolWithAppAndConfig(cc abciclient.Creator, config *cfg.Config) (*CLi
panic(err) panic(err)
} }
mp := NewCListMempool(config.Mempool, appConnMem, 0)
mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
mp.SetLogger(log.TestingLogger()) mp.SetLogger(log.TestingLogger())
return mp, func() { os.RemoveAll(config.RootDir) }
return mp, func() { os.RemoveAll(cfg.RootDir) }
} }
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
@ -217,7 +217,7 @@ func TestMempoolUpdate(t *testing.T) {
func TestMempool_KeepInvalidTxsInCache(t *testing.T) { func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
app := kvstore.NewApplication() app := kvstore.NewApplication()
cc := abciclient.NewLocalCreator(app) cc := abciclient.NewLocalCreator(app)
wcfg := cfg.DefaultConfig()
wcfg := config.DefaultConfig()
wcfg.Mempool.KeepInvalidTxsInCache = true wcfg.Mempool.KeepInvalidTxsInCache = true
mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
defer cleanup() defer cleanup()
@ -465,9 +465,9 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
func TestMempoolTxsBytes(t *testing.T) { func TestMempoolTxsBytes(t *testing.T) {
app := kvstore.NewApplication() app := kvstore.NewApplication()
cc := abciclient.NewLocalCreator(app) cc := abciclient.NewLocalCreator(app)
config := cfg.ResetTestRoot("mempool_test")
config.Mempool.MaxTxsBytes = 10
mp, cleanup := newMempoolWithAppAndConfig(cc, config)
cfg := config.ResetTestRoot("mempool_test")
cfg.Mempool.MaxTxsBytes = 10
mp, cleanup := newMempoolWithAppAndConfig(cc, cfg)
defer cleanup() defer cleanup()
// 1. zero by default // 1. zero by default
@ -564,8 +564,8 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
t.Error(err) t.Error(err)
} }
}) })
config := cfg.ResetTestRoot("mempool_test")
mp, cleanup := newMempoolWithAppAndConfig(cc, config)
cfg := config.ResetTestRoot("mempool_test")
mp, cleanup := newMempoolWithAppAndConfig(cc, cfg)
defer cleanup() defer cleanup()
// generate small number of txs // generate small number of txs
@ -577,7 +577,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
} }
// simulate a group of peers sending them over and over // simulate a group of peers sending them over and over
N := config.Mempool.Size
N := cfg.Mempool.Size
maxPeers := 5 maxPeers := 5
for i := 0; i < N; i++ { for i := 0; i < N; i++ {
peerID := mrand.Intn(maxPeers) peerID := mrand.Intn(maxPeers)


+ 8
- 8
internal/mempool/v0/reactor.go View File

@ -8,7 +8,7 @@ import (
"sync" "sync"
"time" "time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/libs/clist"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
@ -37,7 +37,7 @@ type PeerManager interface {
type Reactor struct { type Reactor struct {
service.BaseService service.BaseService
config *cfg.MempoolConfig
cfg *config.MempoolConfig
mempool *CListMempool mempool *CListMempool
ids *mempool.MempoolIDs ids *mempool.MempoolIDs
@ -61,7 +61,7 @@ type Reactor struct {
// NewReactor returns a reference to a new reactor. // NewReactor returns a reference to a new reactor.
func NewReactor( func NewReactor(
logger log.Logger, logger log.Logger,
config *cfg.MempoolConfig,
cfg *config.MempoolConfig,
peerMgr PeerManager, peerMgr PeerManager,
mp *CListMempool, mp *CListMempool,
mempoolCh *p2p.Channel, mempoolCh *p2p.Channel,
@ -69,7 +69,7 @@ func NewReactor(
) *Reactor { ) *Reactor {
r := &Reactor{ r := &Reactor{
config: config,
cfg: cfg,
peerMgr: peerMgr, peerMgr: peerMgr,
mempool: mp, mempool: mp,
ids: mempool.NewMempoolIDs(), ids: mempool.NewMempoolIDs(),
@ -90,8 +90,8 @@ func NewReactor(
// //
// TODO: Remove once p2p refactor is complete. // TODO: Remove once p2p refactor is complete.
// ref: https://github.com/tendermint/tendermint/issues/5670 // ref: https://github.com/tendermint/tendermint/issues/5670
func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim {
largestTx := make([]byte, config.MaxTxBytes)
func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim {
largestTx := make([]byte, cfg.MaxTxBytes)
batchMsg := protomem.Message{ batchMsg := protomem.Message{
Sum: &protomem.Message_Txs{ Sum: &protomem.Message_Txs{
Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
@ -117,7 +117,7 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe
// messages on that p2p channel accordingly. The caller must be sure to execute // messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed. // OnStop to ensure the outbound p2p Channels are closed.
func (r *Reactor) OnStart() error { func (r *Reactor) OnStart() error {
if !r.config.Broadcast {
if !r.cfg.Broadcast {
r.Logger.Info("tx broadcasting is disabled") r.Logger.Info("tx broadcasting is disabled")
} }
@ -254,7 +254,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
return return
} }
if r.config.Broadcast {
if r.cfg.Broadcast {
// Check if we've already started a goroutine for this peer, if not we create // Check if we've already started a goroutine for this peer, if not we create
// a new done channel so we can explicitly close the goroutine if the peer // a new done channel so we can explicitly close the goroutine if the peer
// is later removed, we increment the waitgroup so the reactor can stop // is later removed, we increment the waitgroup so the reactor can stop


+ 17
- 17
internal/mempool/v0/reactor_test.go View File

@ -11,7 +11,7 @@ import (
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/p2p/p2ptest"
@ -36,7 +36,7 @@ type reactorTestSuite struct {
nodes []types.NodeID nodes []types.NodeID
} }
func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite {
func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite {
t.Helper() t.Helper()
rts := &reactorTestSuite{ rts := &reactorTestSuite{
@ -68,7 +68,7 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac
rts.reactors[nodeID] = NewReactor( rts.reactors[nodeID] = NewReactor(
rts.logger.With("nodeID", nodeID), rts.logger.With("nodeID", nodeID),
cfg,
config,
rts.network.Nodes[nodeID].PeerManager, rts.network.Nodes[nodeID].PeerManager,
mempool, mempool,
rts.mempoolChnnels[nodeID], rts.mempoolChnnels[nodeID],
@ -158,9 +158,9 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...typ
func TestReactorBroadcastTxs(t *testing.T) { func TestReactorBroadcastTxs(t *testing.T) {
numTxs := 1000 numTxs := 1000
numNodes := 10 numNodes := 10
config := cfg.TestConfig()
cfg := config.TestConfig()
rts := setup(t, config.Mempool, numNodes, 0)
rts := setup(t, cfg.Mempool, numNodes, 0)
primary := rts.nodes[0] primary := rts.nodes[0]
secondaries := rts.nodes[1:] secondaries := rts.nodes[1:]
@ -185,9 +185,9 @@ func TestReactorBroadcastTxs(t *testing.T) {
func TestReactorConcurrency(t *testing.T) { func TestReactorConcurrency(t *testing.T) {
numTxs := 5 numTxs := 5
numNodes := 2 numNodes := 2
config := cfg.TestConfig()
cfg := config.TestConfig()
rts := setup(t, config.Mempool, numNodes, 0)
rts := setup(t, cfg.Mempool, numNodes, 0)
primary := rts.nodes[0] primary := rts.nodes[0]
secondary := rts.nodes[1] secondary := rts.nodes[1]
@ -244,9 +244,9 @@ func TestReactorConcurrency(t *testing.T) {
func TestReactorNoBroadcastToSender(t *testing.T) { func TestReactorNoBroadcastToSender(t *testing.T) {
numTxs := 1000 numTxs := 1000
numNodes := 2 numNodes := 2
config := cfg.TestConfig()
cfg := config.TestConfig()
rts := setup(t, config.Mempool, numNodes, uint(numTxs))
rts := setup(t, cfg.Mempool, numNodes, uint(numTxs))
primary := rts.nodes[0] primary := rts.nodes[0]
secondary := rts.nodes[1] secondary := rts.nodes[1]
@ -267,16 +267,16 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
func TestReactor_MaxTxBytes(t *testing.T) { func TestReactor_MaxTxBytes(t *testing.T) {
numNodes := 2 numNodes := 2
config := cfg.TestConfig()
cfg := config.TestConfig()
rts := setup(t, config.Mempool, numNodes, 0)
rts := setup(t, cfg.Mempool, numNodes, 0)
primary := rts.nodes[0] primary := rts.nodes[0]
secondary := rts.nodes[1] secondary := rts.nodes[1]
// Broadcast a tx, which has the max size and ensure it's received by the // Broadcast a tx, which has the max size and ensure it's received by the
// second reactor. // second reactor.
tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes)
err := rts.reactors[primary].mempool.CheckTx( err := rts.reactors[primary].mempool.CheckTx(
context.Background(), context.Background(),
tx1, tx1,
@ -297,7 +297,7 @@ func TestReactor_MaxTxBytes(t *testing.T) {
rts.reactors[secondary].mempool.Flush() rts.reactors[secondary].mempool.Flush()
// broadcast a tx, which is beyond the max size and ensure it's not sent // broadcast a tx, which is beyond the max size and ensure it's not sent
tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1)
err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
require.Error(t, err) require.Error(t, err)
@ -305,11 +305,11 @@ func TestReactor_MaxTxBytes(t *testing.T) {
} }
func TestDontExhaustMaxActiveIDs(t *testing.T) { func TestDontExhaustMaxActiveIDs(t *testing.T) {
config := cfg.TestConfig()
cfg := config.TestConfig()
// we're creating a single node network, but not starting the // we're creating a single node network, but not starting the
// network. // network.
rts := setup(t, config.Mempool, 1, mempool.MaxActiveIDs+1)
rts := setup(t, cfg.Mempool, 1, mempool.MaxActiveIDs+1)
nodeID := rts.nodes[0] nodeID := rts.nodes[0]
@ -374,9 +374,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
t.Skip("skipping test in short mode") t.Skip("skipping test in short mode")
} }
config := cfg.TestConfig()
cfg := config.TestConfig()
rts := setup(t, config.Mempool, 2, 0)
rts := setup(t, cfg.Mempool, 2, 0)
primary := rts.nodes[0] primary := rts.nodes[0]
secondary := rts.nodes[1] secondary := rts.nodes[1]


+ 8
- 8
internal/mempool/v1/reactor.go View File

@ -8,7 +8,7 @@ import (
"sync" "sync"
"time" "time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/libs/clist"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
@ -37,7 +37,7 @@ type PeerManager interface {
type Reactor struct { type Reactor struct {
service.BaseService service.BaseService
config *cfg.MempoolConfig
cfg *config.MempoolConfig
mempool *TxMempool mempool *TxMempool
ids *mempool.MempoolIDs ids *mempool.MempoolIDs
@ -65,7 +65,7 @@ type Reactor struct {
// NewReactor returns a reference to a new reactor. // NewReactor returns a reference to a new reactor.
func NewReactor( func NewReactor(
logger log.Logger, logger log.Logger,
config *cfg.MempoolConfig,
cfg *config.MempoolConfig,
peerMgr PeerManager, peerMgr PeerManager,
txmp *TxMempool, txmp *TxMempool,
mempoolCh *p2p.Channel, mempoolCh *p2p.Channel,
@ -73,7 +73,7 @@ func NewReactor(
) *Reactor { ) *Reactor {
r := &Reactor{ r := &Reactor{
config: config,
cfg: cfg,
peerMgr: peerMgr, peerMgr: peerMgr,
mempool: txmp, mempool: txmp,
ids: mempool.NewMempoolIDs(), ids: mempool.NewMempoolIDs(),
@ -97,8 +97,8 @@ func defaultObservePanic(r interface{}) {}
// //
// TODO: Remove once p2p refactor is complete. // TODO: Remove once p2p refactor is complete.
// ref: https://github.com/tendermint/tendermint/issues/5670 // ref: https://github.com/tendermint/tendermint/issues/5670
func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim {
largestTx := make([]byte, config.MaxTxBytes)
func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim {
largestTx := make([]byte, cfg.MaxTxBytes)
batchMsg := protomem.Message{ batchMsg := protomem.Message{
Sum: &protomem.Message_Txs{ Sum: &protomem.Message_Txs{
Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
@ -124,7 +124,7 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe
// messages on that p2p channel accordingly. The caller must be sure to execute // messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed. // OnStop to ensure the outbound p2p Channels are closed.
func (r *Reactor) OnStart() error { func (r *Reactor) OnStart() error {
if !r.config.Broadcast {
if !r.cfg.Broadcast {
r.Logger.Info("tx broadcasting is disabled") r.Logger.Info("tx broadcasting is disabled")
} }
@ -262,7 +262,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
return return
} }
if r.config.Broadcast {
if r.cfg.Broadcast {
// Check if we've already started a goroutine for this peer, if not we create // Check if we've already started a goroutine for this peer, if not we create
// a new done channel so we can explicitly close the goroutine if the peer // a new done channel so we can explicitly close the goroutine if the peer
// is later removed, we increment the waitgroup so the reactor can stop // is later removed, we increment the waitgroup so the reactor can stop


+ 2
- 2
internal/p2p/conn/evil_secret_connection_test.go View File

@ -13,7 +13,7 @@ import (
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/libs/protoio" "github.com/tendermint/tendermint/internal/libs/protoio"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
) )
@ -113,7 +113,7 @@ func (c *evilConn) Read(data []byte) (n int, err error) {
case 1: case 1:
signature := c.signChallenge() signature := c.signChallenge()
if !c.badAuthSignature { if !c.badAuthSignature {
pkpb, err := cryptoenc.PubKeyToProto(c.privKey.PubKey())
pkpb, err := encoding.PubKeyToProto(c.privKey.PubKey())
if err != nil { if err != nil {
panic(err) panic(err)
} }


+ 3
- 3
internal/p2p/conn/secret_connection.go View File

@ -23,7 +23,7 @@ import (
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/libs/protoio" "github.com/tendermint/tendermint/internal/libs/protoio"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/async" "github.com/tendermint/tendermint/libs/async"
@ -406,7 +406,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte
// Send our info and receive theirs in tandem. // Send our info and receive theirs in tandem.
var trs, _ = async.Parallel( var trs, _ = async.Parallel(
func(_ int) (val interface{}, abort bool, err error) { func(_ int) (val interface{}, abort bool, err error) {
pbpk, err := cryptoenc.PubKeyToProto(pubKey)
pbpk, err := encoding.PubKeyToProto(pubKey)
if err != nil { if err != nil {
return nil, true, err return nil, true, err
} }
@ -423,7 +423,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte
return nil, true, err // abort return nil, true, err // abort
} }
pk, err := cryptoenc.PubKeyFromProto(pba.PubKey)
pk, err := encoding.PubKeyFromProto(pba.PubKey)
if err != nil { if err != nil {
return nil, true, err // abort return nil, true, err // abort
} }


+ 2
- 1
internal/p2p/peermanager_scoring_test.go View File

@ -6,9 +6,10 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
) )
func TestPeerScoring(t *testing.T) { func TestPeerScoring(t *testing.T) {


+ 35
- 35
internal/p2p/pex/reactor_test.go View File

@ -14,7 +14,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/p2p/pex"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
proto "github.com/tendermint/tendermint/proto/tendermint/p2p"
p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -45,7 +45,7 @@ func TestReactorBasic(t *testing.T) {
// assert that when a mock node sends a request it receives a response (and // assert that when a mock node sends a request it receives a response (and
// the correct one) // the correct one)
testNet.sendRequest(t, firstNode, secondNode, true) testNet.sendRequest(t, firstNode, secondNode, true)
testNet.listenForResponse(t, secondNode, firstNode, shortWait, []proto.PexAddressV2(nil))
testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddressV2(nil))
} }
func TestReactorConnectFullNetwork(t *testing.T) { func TestReactorConnectFullNetwork(t *testing.T) {
@ -71,17 +71,17 @@ func TestReactorSendsRequestsTooOften(t *testing.T) {
r.pexInCh <- p2p.Envelope{ r.pexInCh <- p2p.Envelope{
From: badNode, From: badNode,
Message: &proto.PexRequestV2{},
Message: &p2pproto.PexRequestV2{},
} }
resp := <-r.pexOutCh resp := <-r.pexOutCh
msg, ok := resp.Message.(*proto.PexResponseV2)
msg, ok := resp.Message.(*p2pproto.PexResponseV2)
require.True(t, ok) require.True(t, ok)
require.Empty(t, msg.Addresses) require.Empty(t, msg.Addresses)
r.pexInCh <- p2p.Envelope{ r.pexInCh <- p2p.Envelope{
From: badNode, From: badNode,
Message: &proto.PexRequestV2{},
Message: &p2pproto.PexRequestV2{},
} }
peerErr := <-r.pexErrCh peerErr := <-r.pexErrCh
@ -136,10 +136,10 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, added) require.True(t, added)
addresses := make([]proto.PexAddressV2, 101)
addresses := make([]p2pproto.PexAddressV2, 101)
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
addresses[i] = proto.PexAddressV2{
addresses[i] = p2pproto.PexAddressV2{
URL: nodeAddress.String(), URL: nodeAddress.String(),
} }
} }
@ -152,12 +152,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
select { select {
// wait for a request and then send a response with too many addresses // wait for a request and then send a response with too many addresses
case req := <-r.pexOutCh: case req := <-r.pexOutCh:
if _, ok := req.Message.(*proto.PexRequestV2); !ok {
if _, ok := req.Message.(*p2pproto.PexRequestV2); !ok {
t.Fatal("expected v2 pex request") t.Fatal("expected v2 pex request")
} }
r.pexInCh <- p2p.Envelope{ r.pexInCh <- p2p.Envelope{
From: peer.NodeID, From: peer.NodeID,
Message: &proto.PexResponseV2{
Message: &p2pproto.PexResponseV2{
Addresses: addresses, Addresses: addresses,
}, },
} }
@ -290,7 +290,7 @@ func setupSingle(t *testing.T) *singleTestReactor {
pexErrCh := make(chan p2p.PeerError, chBuf) pexErrCh := make(chan p2p.PeerError, chBuf)
pexCh := p2p.NewChannel( pexCh := p2p.NewChannel(
p2p.ChannelID(pex.PexChannel), p2p.ChannelID(pex.PexChannel),
new(proto.PexMessage),
new(p2pproto.PexMessage),
pexInCh, pexInCh,
pexOutCh, pexOutCh,
pexErrCh, pexErrCh,
@ -381,7 +381,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
// NOTE: we don't assert that the channels get drained after stopping the // NOTE: we don't assert that the channels get drained after stopping the
// reactor // reactor
rts.pexChannels = rts.network.MakeChannelsNoCleanup( rts.pexChannels = rts.network.MakeChannelsNoCleanup(
t, pex.ChannelDescriptor(), new(proto.PexMessage), chBuf,
t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), chBuf,
) )
idx := 0 idx := 0
@ -447,7 +447,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) {
r.network.Nodes[node.NodeID] = node r.network.Nodes[node.NodeID] = node
nodeID := node.NodeID nodeID := node.NodeID
r.pexChannels[nodeID] = node.MakeChannelNoCleanup( r.pexChannels[nodeID] = node.MakeChannelNoCleanup(
t, pex.ChannelDescriptor(), new(proto.PexMessage), r.opts.BufferSize,
t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), r.opts.BufferSize,
) )
r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
@ -488,11 +488,11 @@ func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int,
r.logger.Info("Listening for request", "from", fromNode, "to", toNode) r.logger.Info("Listening for request", "from", fromNode, "to", toNode)
to, from := r.checkNodePair(t, toNode, fromNode) to, from := r.checkNodePair(t, toNode, fromNode)
conditional := func(msg p2p.Envelope) bool { conditional := func(msg p2p.Envelope) bool {
_, ok := msg.Message.(*proto.PexRequestV2)
_, ok := msg.Message.(*p2pproto.PexRequestV2)
return ok && msg.From == from return ok && msg.From == from
} }
assertion := func(t *testing.T, msg p2p.Envelope) bool { assertion := func(t *testing.T, msg p2p.Envelope) bool {
require.Equal(t, &proto.PexRequestV2{}, msg.Message)
require.Equal(t, &p2pproto.PexRequestV2{}, msg.Message)
return true return true
} }
r.listenFor(t, to, conditional, assertion, waitPeriod) r.listenFor(t, to, conditional, assertion, waitPeriod)
@ -507,11 +507,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses(
r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode)
to, from := r.checkNodePair(t, toNode, fromNode) to, from := r.checkNodePair(t, toNode, fromNode)
conditional := func(msg p2p.Envelope) bool { conditional := func(msg p2p.Envelope) bool {
_, ok := msg.Message.(*proto.PexResponseV2)
_, ok := msg.Message.(*p2pproto.PexResponseV2)
return ok && msg.From == from return ok && msg.From == from
} }
assertion := func(t *testing.T, msg p2p.Envelope) bool { assertion := func(t *testing.T, msg p2p.Envelope) bool {
m, ok := msg.Message.(*proto.PexResponseV2)
m, ok := msg.Message.(*p2pproto.PexResponseV2)
if !ok { if !ok {
require.Fail(t, "expected pex response v2") require.Fail(t, "expected pex response v2")
return true return true
@ -534,17 +534,17 @@ func (r *reactorTestSuite) listenForResponse(
t *testing.T, t *testing.T,
fromNode, toNode int, fromNode, toNode int,
waitPeriod time.Duration, waitPeriod time.Duration,
addresses []proto.PexAddressV2,
addresses []p2pproto.PexAddressV2,
) { ) {
r.logger.Info("Listening for response", "from", fromNode, "to", toNode) r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
to, from := r.checkNodePair(t, toNode, fromNode) to, from := r.checkNodePair(t, toNode, fromNode)
conditional := func(msg p2p.Envelope) bool { conditional := func(msg p2p.Envelope) bool {
_, ok := msg.Message.(*proto.PexResponseV2)
_, ok := msg.Message.(*p2pproto.PexResponseV2)
r.logger.Info("message", msg, "ok", ok) r.logger.Info("message", msg, "ok", ok)
return ok && msg.From == from return ok && msg.From == from
} }
assertion := func(t *testing.T, msg p2p.Envelope) bool { assertion := func(t *testing.T, msg p2p.Envelope) bool {
require.Equal(t, &proto.PexResponseV2{Addresses: addresses}, msg.Message)
require.Equal(t, &p2pproto.PexResponseV2{Addresses: addresses}, msg.Message)
return true return true
} }
r.listenFor(t, to, conditional, assertion, waitPeriod) r.listenFor(t, to, conditional, assertion, waitPeriod)
@ -554,16 +554,16 @@ func (r *reactorTestSuite) listenForLegacyResponse(
t *testing.T, t *testing.T,
fromNode, toNode int, fromNode, toNode int,
waitPeriod time.Duration, waitPeriod time.Duration,
addresses []proto.PexAddress,
addresses []p2pproto.PexAddress,
) { ) {
r.logger.Info("Listening for response", "from", fromNode, "to", toNode) r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
to, from := r.checkNodePair(t, toNode, fromNode) to, from := r.checkNodePair(t, toNode, fromNode)
conditional := func(msg p2p.Envelope) bool { conditional := func(msg p2p.Envelope) bool {
_, ok := msg.Message.(*proto.PexResponse)
_, ok := msg.Message.(*p2pproto.PexResponse)
return ok && msg.From == from return ok && msg.From == from
} }
assertion := func(t *testing.T, msg p2p.Envelope) bool { assertion := func(t *testing.T, msg p2p.Envelope) bool {
require.Equal(t, &proto.PexResponse{Addresses: addresses}, msg.Message)
require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message)
return true return true
} }
r.listenFor(t, to, conditional, assertion, waitPeriod) r.listenFor(t, to, conditional, assertion, waitPeriod)
@ -595,26 +595,26 @@ func (r *reactorTestSuite) listenForPeerUpdate(
} }
} }
func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []proto.PexAddressV2 {
addresses := make([]proto.PexAddressV2, len(nodes))
func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []p2pproto.PexAddressV2 {
addresses := make([]p2pproto.PexAddressV2, len(nodes))
for idx, node := range nodes { for idx, node := range nodes {
nodeID := r.nodes[node] nodeID := r.nodes[node]
addresses[idx] = proto.PexAddressV2{
addresses[idx] = p2pproto.PexAddressV2{
URL: r.network.Nodes[nodeID].NodeAddress.String(), URL: r.network.Nodes[nodeID].NodeAddress.String(),
} }
} }
return addresses return addresses
} }
func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []proto.PexAddress {
addresses := make([]proto.PexAddress, len(nodes))
func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []p2pproto.PexAddress {
addresses := make([]p2pproto.PexAddress, len(nodes))
for idx, node := range nodes { for idx, node := range nodes {
nodeID := r.nodes[node] nodeID := r.nodes[node]
nodeAddrs := r.network.Nodes[nodeID].NodeAddress nodeAddrs := r.network.Nodes[nodeID].NodeAddress
endpoints, err := nodeAddrs.Resolve(context.Background()) endpoints, err := nodeAddrs.Resolve(context.Background())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, endpoints, 1) require.Len(t, endpoints, 1)
addresses[idx] = proto.PexAddress{
addresses[idx] = p2pproto.PexAddress{
ID: string(nodeAddrs.NodeID), ID: string(nodeAddrs.NodeID),
IP: endpoints[0].IP.String(), IP: endpoints[0].IP.String(),
Port: uint32(endpoints[0].Port), Port: uint32(endpoints[0].Port),
@ -628,12 +628,12 @@ func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bo
if v2 { if v2 {
r.pexChannels[from].Out <- p2p.Envelope{ r.pexChannels[from].Out <- p2p.Envelope{
To: to, To: to,
Message: &proto.PexRequestV2{},
Message: &p2pproto.PexRequestV2{},
} }
} else { } else {
r.pexChannels[from].Out <- p2p.Envelope{ r.pexChannels[from].Out <- p2p.Envelope{
To: to, To: to,
Message: &proto.PexRequest{},
Message: &p2pproto.PexRequest{},
} }
} }
} }
@ -649,7 +649,7 @@ func (r *reactorTestSuite) sendResponse(
addrs := r.getV2AddressesFor(withNodes) addrs := r.getV2AddressesFor(withNodes)
r.pexChannels[from].Out <- p2p.Envelope{ r.pexChannels[from].Out <- p2p.Envelope{
To: to, To: to,
Message: &proto.PexResponseV2{
Message: &p2pproto.PexResponseV2{
Addresses: addrs, Addresses: addrs,
}, },
} }
@ -657,7 +657,7 @@ func (r *reactorTestSuite) sendResponse(
addrs := r.getAddressesFor(t, withNodes) addrs := r.getAddressesFor(t, withNodes)
r.pexChannels[from].Out <- p2p.Envelope{ r.pexChannels[from].Out <- p2p.Envelope{
To: to, To: to,
Message: &proto.PexResponse{
Message: &p2pproto.PexResponse{
Addresses: addrs, Addresses: addrs,
}, },
} }
@ -764,8 +764,8 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int
} }
// nolint: unused // nolint: unused
func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto.PexAddress {
var addresses []proto.PexAddress
func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []p2pproto.PexAddress {
var addresses []p2pproto.PexAddress
for _, i := range nodeIndices { for _, i := range nodeIndices {
if i < len(r.nodes) { if i < len(r.nodes) {
require.Fail(t, "index for pex address is greater than number of nodes") require.Fail(t, "index for pex address is greater than number of nodes")
@ -777,7 +777,7 @@ func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto
require.NoError(t, err) require.NoError(t, err)
for _, endpoint := range endpoints { for _, endpoint := range endpoints {
if endpoint.IP != nil { if endpoint.IP != nil {
addresses = append(addresses, proto.PexAddress{
addresses = append(addresses, p2pproto.PexAddress{
ID: string(nodeAddrs.NodeID), ID: string(nodeAddrs.NodeID),
IP: endpoint.IP.String(), IP: endpoint.IP.String(),
Port: uint32(endpoint.Port), Port: uint32(endpoint.Port),


+ 5
- 5
internal/rpc/core/abci.go View File

@ -4,7 +4,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
) )
@ -16,7 +16,7 @@ func (env *Environment) ABCIQuery(
data bytes.HexBytes, data bytes.HexBytes,
height int64, height int64,
prove bool, prove bool,
) (*ctypes.ResultABCIQuery, error) {
) (*coretypes.ResultABCIQuery, error) {
resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{ resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{
Path: path, Path: path,
Data: data, Data: data,
@ -27,16 +27,16 @@ func (env *Environment) ABCIQuery(
return nil, err return nil, err
} }
return &ctypes.ResultABCIQuery{Response: *resQuery}, nil
return &coretypes.ResultABCIQuery{Response: *resQuery}, nil
} }
// ABCIInfo gets some info about the application. // ABCIInfo gets some info about the application.
// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info
func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) {
resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo) resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultABCIInfo{Response: *resInfo}, nil
return &coretypes.ResultABCIInfo{Response: *resInfo}, nil
} }

+ 21
- 21
internal/rpc/core/blocks.go View File

@ -8,7 +8,7 @@ import (
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
tmmath "github.com/tendermint/tendermint/libs/math" tmmath "github.com/tendermint/tendermint/libs/math"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query" tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -25,7 +25,7 @@ import (
// More: https://docs.tendermint.com/master/rpc/#/Info/blockchain // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain
func (env *Environment) BlockchainInfo( func (env *Environment) BlockchainInfo(
ctx *rpctypes.Context, ctx *rpctypes.Context,
minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) {
const limit int64 = 20 const limit int64 = 20
@ -49,7 +49,7 @@ func (env *Environment) BlockchainInfo(
} }
} }
return &ctypes.ResultBlockchainInfo{
return &coretypes.ResultBlockchainInfo{
LastHeight: env.BlockStore.Height(), LastHeight: env.BlockStore.Height(),
BlockMetas: blockMetas}, nil BlockMetas: blockMetas}, nil
} }
@ -60,7 +60,7 @@ func (env *Environment) BlockchainInfo(
func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) {
// filter negatives // filter negatives
if min < 0 || max < 0 { if min < 0 || max < 0 {
return min, max, ctypes.ErrZeroOrNegativeHeight
return min, max, coretypes.ErrZeroOrNegativeHeight
} }
// adjust for default values // adjust for default values
@ -83,7 +83,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) {
if min > max { if min > max {
return min, max, fmt.Errorf("%w: min height %d can't be greater than max height %d", return min, max, fmt.Errorf("%w: min height %d can't be greater than max height %d",
ctypes.ErrInvalidRequest, min, max)
coretypes.ErrInvalidRequest, min, max)
} }
return min, max, nil return min, max, nil
} }
@ -91,7 +91,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) {
// Block gets block at a given height. // Block gets block at a given height.
// If no height is provided, it will fetch the latest block. // If no height is provided, it will fetch the latest block.
// More: https://docs.tendermint.com/master/rpc/#/Info/block // More: https://docs.tendermint.com/master/rpc/#/Info/block
func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) {
func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlock, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr) height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -99,33 +99,33 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.
blockMeta := env.BlockStore.LoadBlockMeta(height) blockMeta := env.BlockStore.LoadBlockMeta(height)
if blockMeta == nil { if blockMeta == nil {
return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil
return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil
} }
block := env.BlockStore.LoadBlock(height) block := env.BlockStore.LoadBlock(height)
return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil
return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil
} }
// BlockByHash gets block by hash. // BlockByHash gets block by hash.
// More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash
func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) {
func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) {
// N.B. The hash parameter is HexBytes so that the reflective parameter // N.B. The hash parameter is HexBytes so that the reflective parameter
// decoding logic in the HTTP service will correctly translate from JSON. // decoding logic in the HTTP service will correctly translate from JSON.
// See https://github.com/tendermint/tendermint/issues/6802 for context. // See https://github.com/tendermint/tendermint/issues/6802 for context.
block := env.BlockStore.LoadBlockByHash(hash) block := env.BlockStore.LoadBlockByHash(hash)
if block == nil { if block == nil {
return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil
return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil
} }
// If block is not nil, then blockMeta can't be nil. // If block is not nil, then blockMeta can't be nil.
blockMeta := env.BlockStore.LoadBlockMeta(block.Height) blockMeta := env.BlockStore.LoadBlockMeta(block.Height)
return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil
return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil
} }
// Commit gets block commit at a given height. // Commit gets block commit at a given height.
// If no height is provided, it will fetch the commit for the latest block. // If no height is provided, it will fetch the commit for the latest block.
// More: https://docs.tendermint.com/master/rpc/#/Info/commit // More: https://docs.tendermint.com/master/rpc/#/Info/commit
func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) {
func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultCommit, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr) height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -144,7 +144,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes
// NOTE: we can't yet ensure atomicity of operations in asserting // NOTE: we can't yet ensure atomicity of operations in asserting
// whether this is the latest height and retrieving the seen commit // whether this is the latest height and retrieving the seen commit
if commit != nil && commit.Height == height { if commit != nil && commit.Height == height {
return ctypes.NewResultCommit(&header, commit, false), nil
return coretypes.NewResultCommit(&header, commit, false), nil
} }
} }
@ -153,7 +153,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes
if commit == nil { if commit == nil {
return nil, nil return nil, nil
} }
return ctypes.NewResultCommit(&header, commit, true), nil
return coretypes.NewResultCommit(&header, commit, true), nil
} }
// BlockResults gets ABCIResults at a given height. // BlockResults gets ABCIResults at a given height.
@ -163,7 +163,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes
// Thus response.results.deliver_tx[5] is the results of executing // Thus response.results.deliver_tx[5] is the results of executing
// getBlock(h).Txs[5] // getBlock(h).Txs[5]
// More: https://docs.tendermint.com/master/rpc/#/Info/block_results // More: https://docs.tendermint.com/master/rpc/#/Info/block_results
func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) {
func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr) height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -179,7 +179,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*
totalGasUsed += tx.GetGasUsed() totalGasUsed += tx.GetGasUsed()
} }
return &ctypes.ResultBlockResults{
return &coretypes.ResultBlockResults{
Height: height, Height: height,
TxsResults: results.DeliverTxs, TxsResults: results.DeliverTxs,
TotalGasUsed: totalGasUsed, TotalGasUsed: totalGasUsed,
@ -197,7 +197,7 @@ func (env *Environment) BlockSearch(
query string, query string,
pagePtr, perPagePtr *int, pagePtr, perPagePtr *int,
orderBy string, orderBy string,
) (*ctypes.ResultBlockSearch, error) {
) (*coretypes.ResultBlockSearch, error) {
if !indexer.KVSinkEnabled(env.EventSinks) { if !indexer.KVSinkEnabled(env.EventSinks) {
return nil, fmt.Errorf("block searching is disabled due to no kvEventSink") return nil, fmt.Errorf("block searching is disabled due to no kvEventSink")
@ -229,7 +229,7 @@ func (env *Environment) BlockSearch(
sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) sort.Slice(results, func(i, j int) bool { return results[i] < results[j] })
default: default:
return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest)
return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest)
} }
// paginate results // paginate results
@ -244,13 +244,13 @@ func (env *Environment) BlockSearch(
skipCount := validateSkipCount(page, perPage) skipCount := validateSkipCount(page, perPage)
pageSize := tmmath.MinInt(perPage, totalCount-skipCount) pageSize := tmmath.MinInt(perPage, totalCount-skipCount)
apiResults := make([]*ctypes.ResultBlock, 0, pageSize)
apiResults := make([]*coretypes.ResultBlock, 0, pageSize)
for i := skipCount; i < skipCount+pageSize; i++ { for i := skipCount; i < skipCount+pageSize; i++ {
block := env.BlockStore.LoadBlock(results[i]) block := env.BlockStore.LoadBlock(results[i])
if block != nil { if block != nil {
blockMeta := env.BlockStore.LoadBlockMeta(block.Height) blockMeta := env.BlockStore.LoadBlockMeta(block.Height)
if blockMeta != nil { if blockMeta != nil {
apiResults = append(apiResults, &ctypes.ResultBlock{
apiResults = append(apiResults, &coretypes.ResultBlock{
Block: block, Block: block,
BlockID: blockMeta.BlockID, BlockID: blockMeta.BlockID,
}) })
@ -258,5 +258,5 @@ func (env *Environment) BlockSearch(
} }
} }
return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil
return &coretypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil
} }

+ 3
- 3
internal/rpc/core/blocks_test.go View File

@ -12,7 +12,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -89,12 +89,12 @@ func TestBlockResults(t *testing.T) {
testCases := []struct { testCases := []struct {
height int64 height int64
wantErr bool wantErr bool
wantRes *ctypes.ResultBlockResults
wantRes *coretypes.ResultBlockResults
}{ }{
{-1, true, nil}, {-1, true, nil},
{0, true, nil}, {0, true, nil},
{101, true, nil}, {101, true, nil},
{100, false, &ctypes.ResultBlockResults{
{100, false, &coretypes.ResultBlockResults{
Height: 100, Height: 100,
TxsResults: results.DeliverTxs, TxsResults: results.DeliverTxs,
TotalGasUsed: 15, TotalGasUsed: 15,


+ 16
- 16
internal/rpc/core/consensus.go View File

@ -3,9 +3,9 @@ package core
import ( import (
"errors" "errors"
cm "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/consensus"
tmmath "github.com/tendermint/tendermint/libs/math" tmmath "github.com/tendermint/tendermint/libs/math"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -20,7 +20,7 @@ import (
func (env *Environment) Validators( func (env *Environment) Validators(
ctx *rpctypes.Context, ctx *rpctypes.Context,
heightPtr *int64, heightPtr *int64,
pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) {
pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) {
// The latest validator that we know is the NextValidator of the last block. // The latest validator that we know is the NextValidator of the last block.
height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr)
@ -44,7 +44,7 @@ func (env *Environment) Validators(
v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)]
return &ctypes.ResultValidators{
return &coretypes.ResultValidators{
BlockHeight: height, BlockHeight: height,
Validators: v, Validators: v,
Count: len(v), Count: len(v),
@ -54,16 +54,16 @@ func (env *Environment) Validators(
// DumpConsensusState dumps consensus state. // DumpConsensusState dumps consensus state.
// UNSTABLE // UNSTABLE
// More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state
func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) {
func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) {
// Get Peer consensus states. // Get Peer consensus states.
var peerStates []ctypes.PeerStateInfo
var peerStates []coretypes.PeerStateInfo
switch { switch {
case env.P2PPeers != nil: case env.P2PPeers != nil:
peers := env.P2PPeers.Peers().List() peers := env.P2PPeers.Peers().List()
peerStates = make([]ctypes.PeerStateInfo, 0, len(peers))
peerStates = make([]coretypes.PeerStateInfo, 0, len(peers))
for _, peer := range peers { for _, peer := range peers {
peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState)
peerState, ok := peer.Get(types.PeerStateKey).(*consensus.PeerState)
if !ok { // peer does not have a state yet if !ok { // peer does not have a state yet
continue continue
} }
@ -71,7 +71,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul
if err != nil { if err != nil {
return nil, err return nil, err
} }
peerStates = append(peerStates, ctypes.PeerStateInfo{
peerStates = append(peerStates, coretypes.PeerStateInfo{
// Peer basic info. // Peer basic info.
NodeAddress: peer.SocketAddr().String(), NodeAddress: peer.SocketAddr().String(),
// Peer consensus state. // Peer consensus state.
@ -80,7 +80,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul
} }
case env.PeerManager != nil: case env.PeerManager != nil:
peers := env.PeerManager.Peers() peers := env.PeerManager.Peers()
peerStates = make([]ctypes.PeerStateInfo, 0, len(peers))
peerStates = make([]coretypes.PeerStateInfo, 0, len(peers))
for _, pid := range peers { for _, pid := range peers {
peerState, ok := env.ConsensusReactor.GetPeerState(pid) peerState, ok := env.ConsensusReactor.GetPeerState(pid)
if !ok { if !ok {
@ -94,7 +94,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul
addr := env.PeerManager.Addresses(pid) addr := env.PeerManager.Addresses(pid)
if len(addr) >= 1 { if len(addr) >= 1 {
peerStates = append(peerStates, ctypes.PeerStateInfo{
peerStates = append(peerStates, coretypes.PeerStateInfo{
// Peer basic info. // Peer basic info.
NodeAddress: addr[0].String(), NodeAddress: addr[0].String(),
// Peer consensus state. // Peer consensus state.
@ -111,7 +111,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultDumpConsensusState{
return &coretypes.ResultDumpConsensusState{
RoundState: roundState, RoundState: roundState,
Peers: peerStates}, nil Peers: peerStates}, nil
} }
@ -119,10 +119,10 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul
// ConsensusState returns a concise summary of the consensus state. // ConsensusState returns a concise summary of the consensus state.
// UNSTABLE // UNSTABLE
// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state
func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) {
func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) {
// Get self round state. // Get self round state.
bz, err := env.ConsensusState.GetRoundStateSimpleJSON() bz, err := env.ConsensusState.GetRoundStateSimpleJSON()
return &ctypes.ResultConsensusState{RoundState: bz}, err
return &coretypes.ResultConsensusState{RoundState: bz}, err
} }
// ConsensusParams gets the consensus parameters at the given block height. // ConsensusParams gets the consensus parameters at the given block height.
@ -130,7 +130,7 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.Result
// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params
func (env *Environment) ConsensusParams( func (env *Environment) ConsensusParams(
ctx *rpctypes.Context, ctx *rpctypes.Context,
heightPtr *int64) (*ctypes.ResultConsensusParams, error) {
heightPtr *int64) (*coretypes.ResultConsensusParams, error) {
// The latest consensus params that we know is the consensus params after the // The latest consensus params that we know is the consensus params after the
// last block. // last block.
@ -144,7 +144,7 @@ func (env *Environment) ConsensusParams(
return nil, err return nil, err
} }
return &ctypes.ResultConsensusParams{
return &coretypes.ResultConsensusParams{
BlockHeight: height, BlockHeight: height,
ConsensusParams: consensusParams}, nil ConsensusParams: consensusParams}, nil
} }

+ 3
- 3
internal/rpc/core/dev.go View File

@ -1,12 +1,12 @@
package core package core
import ( import (
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
) )
// UnsafeFlushMempool removes all transactions from the mempool. // UnsafeFlushMempool removes all transactions from the mempool.
func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) {
func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*coretypes.ResultUnsafeFlushMempool, error) {
env.Mempool.Flush() env.Mempool.Flush()
return &ctypes.ResultUnsafeFlushMempool{}, nil
return &coretypes.ResultUnsafeFlushMempool{}, nil
} }

+ 10
- 10
internal/rpc/core/env.go View File

@ -5,10 +5,10 @@ import (
"fmt" "fmt"
"time" "time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/consensus"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
@ -16,7 +16,7 @@ import (
"github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/internal/statesync"
tmjson "github.com/tendermint/tendermint/libs/json" tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -96,13 +96,13 @@ type Environment struct {
GenDoc *types.GenesisDoc // cache the genesis structure GenDoc *types.GenesisDoc // cache the genesis structure
EventSinks []indexer.EventSink EventSinks []indexer.EventSink
EventBus *types.EventBus // thread safe EventBus *types.EventBus // thread safe
Mempool mempl.Mempool
Mempool mempool.Mempool
BlockSyncReactor consensus.BlockSyncReactor BlockSyncReactor consensus.BlockSyncReactor
StateSyncMetricer statesync.Metricer StateSyncMetricer statesync.Metricer
Logger log.Logger Logger log.Logger
Config cfg.RPCConfig
Config config.RPCConfig
// cache of chunked genesis data. // cache of chunked genesis data.
genChunks []string genChunks []string
@ -113,7 +113,7 @@ type Environment struct {
func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { func validatePage(pagePtr *int, perPage, totalCount int) (int, error) {
// this can only happen if we haven't first run validatePerPage // this can only happen if we haven't first run validatePerPage
if perPage < 1 { if perPage < 1 {
panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage))
panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage))
} }
if pagePtr == nil { // no page parameter if pagePtr == nil { // no page parameter
@ -126,7 +126,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) {
} }
page := *pagePtr page := *pagePtr
if page <= 0 || page > pages { if page <= 0 || page > pages {
return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page)
return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page)
} }
return page, nil return page, nil
@ -191,15 +191,15 @@ func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64,
if heightPtr != nil { if heightPtr != nil {
height := *heightPtr height := *heightPtr
if height <= 0 { if height <= 0 {
return 0, fmt.Errorf("%w (requested height: %d)", ctypes.ErrZeroOrNegativeHeight, height)
return 0, fmt.Errorf("%w (requested height: %d)", coretypes.ErrZeroOrNegativeHeight, height)
} }
if height > latestHeight { if height > latestHeight {
return 0, fmt.Errorf("%w (requested height: %d, blockchain height: %d)", return 0, fmt.Errorf("%w (requested height: %d, blockchain height: %d)",
ctypes.ErrHeightExceedsChainHead, height, latestHeight)
coretypes.ErrHeightExceedsChainHead, height, latestHeight)
} }
base := env.BlockStore.Base() base := env.BlockStore.Base()
if height < base { if height < base {
return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, height, base)
return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, height, base)
} }
return height, nil return height, nil
} }


+ 8
- 8
internal/rpc/core/events.go View File

@ -7,7 +7,7 @@ import (
tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query" tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
) )
@ -18,7 +18,7 @@ const (
// Subscribe for events via WebSocket. // Subscribe for events via WebSocket.
// More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe // More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe
func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) {
func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) {
addr := ctx.RemoteAddr() addr := ctx.RemoteAddr()
if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients {
@ -49,7 +49,7 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.
select { select {
case msg := <-sub.Out(): case msg := <-sub.Out():
var ( var (
resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()}
resultEvent = &coretypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()}
resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent)
) )
writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@ -80,12 +80,12 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.
} }
}() }()
return &ctypes.ResultSubscribe{}, nil
return &coretypes.ResultSubscribe{}, nil
} }
// Unsubscribe from events via WebSocket. // Unsubscribe from events via WebSocket.
// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe
func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) {
func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) {
args := tmpubsub.UnsubscribeArgs{Subscriber: ctx.RemoteAddr()} args := tmpubsub.UnsubscribeArgs{Subscriber: ctx.RemoteAddr()}
env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", query) env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", query)
@ -100,17 +100,17 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctype
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultUnsubscribe{}, nil
return &coretypes.ResultUnsubscribe{}, nil
} }
// UnsubscribeAll from all events via WebSocket. // UnsubscribeAll from all events via WebSocket.
// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all
func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) {
func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) {
addr := ctx.RemoteAddr() addr := ctx.RemoteAddr()
env.Logger.Info("Unsubscribe from all", "remote", addr) env.Logger.Info("Unsubscribe from all", "remote", addr)
err := env.EventBus.UnsubscribeAll(ctx.Context(), addr) err := env.EventBus.UnsubscribeAll(ctx.Context(), addr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultUnsubscribe{}, nil
return &coretypes.ResultUnsubscribe{}, nil
} }

+ 4
- 4
internal/rpc/core/evidence.go View File

@ -3,7 +3,7 @@ package core
import ( import (
"fmt" "fmt"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -12,10 +12,10 @@ import (
// More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence // More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence
func (env *Environment) BroadcastEvidence( func (env *Environment) BroadcastEvidence(
ctx *rpctypes.Context, ctx *rpctypes.Context,
ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) {
if ev == nil { if ev == nil {
return nil, fmt.Errorf("%w: no evidence was provided", ctypes.ErrInvalidRequest)
return nil, fmt.Errorf("%w: no evidence was provided", coretypes.ErrInvalidRequest)
} }
if err := ev.ValidateBasic(); err != nil { if err := ev.ValidateBasic(); err != nil {
@ -25,5 +25,5 @@ func (env *Environment) BroadcastEvidence(
if err := env.EvidencePool.AddEvidence(ev); err != nil { if err := env.EvidencePool.AddEvidence(ev); err != nil {
return nil, fmt.Errorf("failed to add evidence: %w", err) return nil, fmt.Errorf("failed to add evidence: %w", err)
} }
return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil
return &coretypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil
} }

+ 3
- 3
internal/rpc/core/health.go View File

@ -1,13 +1,13 @@
package core package core
import ( import (
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
) )
// Health gets node health. Returns empty result (200 OK) on success, no // Health gets node health. Returns empty result (200 OK) on success, no
// response - in case of an error. // response - in case of an error.
// More: https://docs.tendermint.com/master/rpc/#/Info/health // More: https://docs.tendermint.com/master/rpc/#/Info/health
func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) {
return &ctypes.ResultHealth{}, nil
func (env *Environment) Health(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) {
return &coretypes.ResultHealth{}, nil
} }

+ 20
- 20
internal/rpc/core/mempool.go View File

@ -7,9 +7,9 @@ import (
"time" "time"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/mempool"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -20,25 +20,25 @@ import (
// BroadcastTxAsync returns right away, with no response. Does not wait for // BroadcastTxAsync returns right away, with no response. Does not wait for
// CheckTx nor DeliverTx results. // CheckTx nor DeliverTx results.
// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async
func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempl.TxInfo{})
func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempool.TxInfo{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil
return &coretypes.ResultBroadcastTx{Hash: tx.Hash()}, nil
} }
// BroadcastTxSync returns with the response from CheckTx. Does not wait for // BroadcastTxSync returns with the response from CheckTx. Does not wait for
// DeliverTx result. // DeliverTx result.
// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync
func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
resCh := make(chan *abci.Response, 1) resCh := make(chan *abci.Response, 1)
err := env.Mempool.CheckTx( err := env.Mempool.CheckTx(
ctx.Context(), ctx.Context(),
tx, tx,
func(res *abci.Response) { resCh <- res }, func(res *abci.Response) { resCh <- res },
mempl.TxInfo{},
mempool.TxInfo{},
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -47,7 +47,7 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct
res := <-resCh res := <-resCh
r := res.GetCheckTx() r := res.GetCheckTx()
return &ctypes.ResultBroadcastTx{
return &coretypes.ResultBroadcastTx{
Code: r.Code, Code: r.Code,
Data: r.Data, Data: r.Data,
Log: r.Log, Log: r.Log,
@ -59,7 +59,7 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct
// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx.
// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit
func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { //nolint:lll
subscriber := ctx.RemoteAddr() subscriber := ctx.RemoteAddr()
if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients {
@ -91,7 +91,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
ctx.Context(), ctx.Context(),
tx, tx,
func(res *abci.Response) { checkTxResCh <- res }, func(res *abci.Response) { checkTxResCh <- res },
mempl.TxInfo{},
mempool.TxInfo{},
) )
if err != nil { if err != nil {
env.Logger.Error("Error on broadcastTxCommit", "err", err) env.Logger.Error("Error on broadcastTxCommit", "err", err)
@ -102,7 +102,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
checkTxRes := checkTxResMsg.GetCheckTx() checkTxRes := checkTxResMsg.GetCheckTx()
if checkTxRes.Code != abci.CodeTypeOK { if checkTxRes.Code != abci.CodeTypeOK {
return &ctypes.ResultBroadcastTxCommit{
return &coretypes.ResultBroadcastTxCommit{
CheckTx: *checkTxRes, CheckTx: *checkTxRes,
DeliverTx: abci.ResponseDeliverTx{}, DeliverTx: abci.ResponseDeliverTx{},
Hash: tx.Hash(), Hash: tx.Hash(),
@ -113,7 +113,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
select { select {
case msg := <-deliverTxSub.Out(): // The tx was included in a block. case msg := <-deliverTxSub.Out(): // The tx was included in a block.
deliverTxRes := msg.Data().(types.EventDataTx) deliverTxRes := msg.Data().(types.EventDataTx)
return &ctypes.ResultBroadcastTxCommit{
return &coretypes.ResultBroadcastTxCommit{
CheckTx: *checkTxRes, CheckTx: *checkTxRes,
DeliverTx: deliverTxRes.Result, DeliverTx: deliverTxRes.Result,
Hash: tx.Hash(), Hash: tx.Hash(),
@ -128,7 +128,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
} }
err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason) err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason)
env.Logger.Error("Error on broadcastTxCommit", "err", err) env.Logger.Error("Error on broadcastTxCommit", "err", err)
return &ctypes.ResultBroadcastTxCommit{
return &coretypes.ResultBroadcastTxCommit{
CheckTx: *checkTxRes, CheckTx: *checkTxRes,
DeliverTx: abci.ResponseDeliverTx{}, DeliverTx: abci.ResponseDeliverTx{},
Hash: tx.Hash(), Hash: tx.Hash(),
@ -136,7 +136,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
case <-time.After(env.Config.TimeoutBroadcastTxCommit): case <-time.After(env.Config.TimeoutBroadcastTxCommit):
err = errors.New("timed out waiting for tx to be included in a block") err = errors.New("timed out waiting for tx to be included in a block")
env.Logger.Error("Error on broadcastTxCommit", "err", err) env.Logger.Error("Error on broadcastTxCommit", "err", err)
return &ctypes.ResultBroadcastTxCommit{
return &coretypes.ResultBroadcastTxCommit{
CheckTx: *checkTxRes, CheckTx: *checkTxRes,
DeliverTx: abci.ResponseDeliverTx{}, DeliverTx: abci.ResponseDeliverTx{},
Hash: tx.Hash(), Hash: tx.Hash(),
@ -147,12 +147,12 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries)
// including their number. // including their number.
// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs // More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs
func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) {
func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*coretypes.ResultUnconfirmedTxs, error) {
// reuse per_page validator // reuse per_page validator
limit := env.validatePerPage(limitPtr) limit := env.validatePerPage(limitPtr)
txs := env.Mempool.ReapMaxTxs(limit) txs := env.Mempool.ReapMaxTxs(limit)
return &ctypes.ResultUnconfirmedTxs{
return &coretypes.ResultUnconfirmedTxs{
Count: len(txs), Count: len(txs),
Total: env.Mempool.Size(), Total: env.Mempool.Size(),
TotalBytes: env.Mempool.SizeBytes(), TotalBytes: env.Mempool.SizeBytes(),
@ -161,8 +161,8 @@ func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*c
// NumUnconfirmedTxs gets number of unconfirmed transactions. // NumUnconfirmedTxs gets number of unconfirmed transactions.
// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs // More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs
func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) {
return &ctypes.ResultUnconfirmedTxs{
func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) {
return &coretypes.ResultUnconfirmedTxs{
Count: env.Mempool.Size(), Count: env.Mempool.Size(),
Total: env.Mempool.Size(), Total: env.Mempool.Size(),
TotalBytes: env.Mempool.SizeBytes()}, nil TotalBytes: env.Mempool.SizeBytes()}, nil
@ -171,10 +171,10 @@ func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.Result
// CheckTx checks the transaction without executing it. The transaction won't // CheckTx checks the transaction without executing it. The transaction won't
// be added to the mempool either. // be added to the mempool either.
// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx // More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx
func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) {
res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx})
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil
return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil
} }

+ 23
- 23
internal/rpc/core/net.go View File

@ -6,21 +6,21 @@ import (
"strings" "strings"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
) )
// NetInfo returns network info. // NetInfo returns network info.
// More: https://docs.tendermint.com/master/rpc/#/Info/net_info // More: https://docs.tendermint.com/master/rpc/#/Info/net_info
func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
var peers []ctypes.Peer
func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) {
var peers []coretypes.Peer
switch { switch {
case env.P2PPeers != nil: case env.P2PPeers != nil:
peersList := env.P2PPeers.Peers().List() peersList := env.P2PPeers.Peers().List()
peers = make([]ctypes.Peer, 0, len(peersList))
peers = make([]coretypes.Peer, 0, len(peersList))
for _, peer := range peersList { for _, peer := range peersList {
peers = append(peers, ctypes.Peer{
peers = append(peers, coretypes.Peer{
ID: peer.ID(), ID: peer.ID(),
URL: peer.SocketAddr().String(), URL: peer.SocketAddr().String(),
}) })
@ -33,7 +33,7 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e
continue continue
} }
peers = append(peers, ctypes.Peer{
peers = append(peers, coretypes.Peer{
ID: peer, ID: peer,
URL: addrs[0].String(), URL: addrs[0].String(),
}) })
@ -42,7 +42,7 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e
return nil, errors.New("peer management system does not support NetInfo responses") return nil, errors.New("peer management system does not support NetInfo responses")
} }
return &ctypes.ResultNetInfo{
return &coretypes.ResultNetInfo{
Listening: env.P2PTransport.IsListening(), Listening: env.P2PTransport.IsListening(),
Listeners: env.P2PTransport.Listeners(), Listeners: env.P2PTransport.Listeners(),
NPeers: len(peers), NPeers: len(peers),
@ -51,19 +51,19 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e
} }
// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). // UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT).
func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*coretypes.ResultDialSeeds, error) {
if env.P2PPeers == nil { if env.P2PPeers == nil {
return nil, errors.New("peer management system does not support this operation") return nil, errors.New("peer management system does not support this operation")
} }
if len(seeds) == 0 { if len(seeds) == 0 {
return &ctypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", ctypes.ErrInvalidRequest)
return &coretypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", coretypes.ErrInvalidRequest)
} }
env.Logger.Info("DialSeeds", "seeds", seeds) env.Logger.Info("DialSeeds", "seeds", seeds)
if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { if err := env.P2PPeers.DialPeersAsync(seeds); err != nil {
return &ctypes.ResultDialSeeds{}, err
return &coretypes.ResultDialSeeds{}, err
} }
return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil
return &coretypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil
} }
// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), // UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT),
@ -71,19 +71,19 @@ func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (
func (env *Environment) UnsafeDialPeers( func (env *Environment) UnsafeDialPeers(
ctx *rpctypes.Context, ctx *rpctypes.Context,
peers []string, peers []string,
persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) {
persistent, unconditional, private bool) (*coretypes.ResultDialPeers, error) {
if env.P2PPeers == nil { if env.P2PPeers == nil {
return nil, errors.New("peer management system does not support this operation") return nil, errors.New("peer management system does not support this operation")
} }
if len(peers) == 0 { if len(peers) == 0 {
return &ctypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", ctypes.ErrInvalidRequest)
return &coretypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", coretypes.ErrInvalidRequest)
} }
ids, err := getIDs(peers) ids, err := getIDs(peers)
if err != nil { if err != nil {
return &ctypes.ResultDialPeers{}, err
return &coretypes.ResultDialPeers{}, err
} }
env.Logger.Info("DialPeers", "peers", peers, "persistent", env.Logger.Info("DialPeers", "peers", peers, "persistent",
@ -91,40 +91,40 @@ func (env *Environment) UnsafeDialPeers(
if persistent { if persistent {
if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { if err := env.P2PPeers.AddPersistentPeers(peers); err != nil {
return &ctypes.ResultDialPeers{}, err
return &coretypes.ResultDialPeers{}, err
} }
} }
if private { if private {
if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil {
return &ctypes.ResultDialPeers{}, err
return &coretypes.ResultDialPeers{}, err
} }
} }
if unconditional { if unconditional {
if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil {
return &ctypes.ResultDialPeers{}, err
return &coretypes.ResultDialPeers{}, err
} }
} }
if err := env.P2PPeers.DialPeersAsync(peers); err != nil { if err := env.P2PPeers.DialPeersAsync(peers); err != nil {
return &ctypes.ResultDialPeers{}, err
return &coretypes.ResultDialPeers{}, err
} }
return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil
return &coretypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil
} }
// Genesis returns genesis file. // Genesis returns genesis file.
// More: https://docs.tendermint.com/master/rpc/#/Info/genesis // More: https://docs.tendermint.com/master/rpc/#/Info/genesis
func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) {
if len(env.genChunks) > 1 { if len(env.genChunks) > 1 {
return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") return nil, errors.New("genesis response is large, please use the genesis_chunked API instead")
} }
return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil
return &coretypes.ResultGenesis{Genesis: env.GenDoc}, nil
} }
func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) {
func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) {
if env.genChunks == nil { if env.genChunks == nil {
return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized")
} }
@ -139,7 +139,7 @@ func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctyp
return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id)
} }
return &ctypes.ResultGenesisChunk{
return &coretypes.ResultGenesisChunk{
TotalChunks: len(env.genChunks), TotalChunks: len(env.genChunks),
ChunkNumber: id, ChunkNumber: id,
Data: env.genChunks[id], Data: env.genChunks[id],


+ 3
- 3
internal/rpc/core/net_test.go View File

@ -6,14 +6,14 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
) )
func TestUnsafeDialSeeds(t *testing.T) { func TestUnsafeDialSeeds(t *testing.T) {
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123",
sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123",
func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger())
err := sw.Start() err := sw.Start()
require.NoError(t, err) require.NoError(t, err)
@ -48,7 +48,7 @@ func TestUnsafeDialSeeds(t *testing.T) {
} }
func TestUnsafeDialPeers(t *testing.T) { func TestUnsafeDialPeers(t *testing.T) {
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123",
sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123",
func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger())
sw.SetAddrBook(&p2p.AddrBookMock{ sw.SetAddrBook(&p2p.AddrBookMock{
Addrs: make(map[string]struct{}), Addrs: make(map[string]struct{}),


+ 6
- 6
internal/rpc/core/status.go View File

@ -5,7 +5,7 @@ import (
"time" "time"
tmbytes "github.com/tendermint/tendermint/libs/bytes" tmbytes "github.com/tendermint/tendermint/libs/bytes"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -13,7 +13,7 @@ import (
// Status returns Tendermint status including node info, pubkey, latest block // Status returns Tendermint status including node info, pubkey, latest block
// hash, app hash, block height, current max peer block height, and time. // hash, app hash, block height, current max peer block height, and time.
// More: https://docs.tendermint.com/master/rpc/#/Info/status // More: https://docs.tendermint.com/master/rpc/#/Info/status
func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) {
var ( var (
earliestBlockHeight int64 earliestBlockHeight int64
earliestBlockHash tmbytes.HexBytes earliestBlockHash tmbytes.HexBytes
@ -50,17 +50,17 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err
if val := env.validatorAtHeight(env.latestUncommittedHeight()); val != nil { if val := env.validatorAtHeight(env.latestUncommittedHeight()); val != nil {
votingPower = val.VotingPower votingPower = val.VotingPower
} }
validatorInfo := ctypes.ValidatorInfo{}
validatorInfo := coretypes.ValidatorInfo{}
if env.PubKey != nil { if env.PubKey != nil {
validatorInfo = ctypes.ValidatorInfo{
validatorInfo = coretypes.ValidatorInfo{
Address: env.PubKey.Address(), Address: env.PubKey.Address(),
PubKey: env.PubKey, PubKey: env.PubKey,
VotingPower: votingPower, VotingPower: votingPower,
} }
} }
result := &ctypes.ResultStatus{
result := &coretypes.ResultStatus{
NodeInfo: env.P2PTransport.NodeInfo(), NodeInfo: env.P2PTransport.NodeInfo(),
SyncInfo: ctypes.SyncInfo{
SyncInfo: coretypes.SyncInfo{
LatestBlockHash: latestBlockHash, LatestBlockHash: latestBlockHash,
LatestAppHash: latestAppHash, LatestAppHash: latestAppHash,
LatestBlockHeight: latestHeight, LatestBlockHeight: latestHeight,


+ 8
- 8
internal/rpc/core/tx.go View File

@ -9,7 +9,7 @@ import (
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
tmmath "github.com/tendermint/tendermint/libs/math" tmmath "github.com/tendermint/tendermint/libs/math"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query" tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -18,7 +18,7 @@ import (
// transaction is in the mempool, invalidated, or was not sent in the first // transaction is in the mempool, invalidated, or was not sent in the first
// place. // place.
// More: https://docs.tendermint.com/master/rpc/#/Info/tx // More: https://docs.tendermint.com/master/rpc/#/Info/tx
func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) {
func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) {
// if index is disabled, return error // if index is disabled, return error
// N.B. The hash parameter is HexBytes so that the reflective parameter // N.B. The hash parameter is HexBytes so that the reflective parameter
@ -45,7 +45,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo
proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines
} }
return &ctypes.ResultTx{
return &coretypes.ResultTx{
Hash: hash, Hash: hash,
Height: height, Height: height,
Index: index, Index: index,
@ -68,7 +68,7 @@ func (env *Environment) TxSearch(
prove bool, prove bool,
pagePtr, perPagePtr *int, pagePtr, perPagePtr *int,
orderBy string, orderBy string,
) (*ctypes.ResultTxSearch, error) {
) (*coretypes.ResultTxSearch, error) {
if !indexer.KVSinkEnabled(env.EventSinks) { if !indexer.KVSinkEnabled(env.EventSinks) {
return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink") return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink")
@ -103,7 +103,7 @@ func (env *Environment) TxSearch(
return results[i].Height < results[j].Height return results[i].Height < results[j].Height
}) })
default: default:
return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest)
return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest)
} }
// paginate results // paginate results
@ -118,7 +118,7 @@ func (env *Environment) TxSearch(
skipCount := validateSkipCount(page, perPage) skipCount := validateSkipCount(page, perPage)
pageSize := tmmath.MinInt(perPage, totalCount-skipCount) pageSize := tmmath.MinInt(perPage, totalCount-skipCount)
apiResults := make([]*ctypes.ResultTx, 0, pageSize)
apiResults := make([]*coretypes.ResultTx, 0, pageSize)
for i := skipCount; i < skipCount+pageSize; i++ { for i := skipCount; i < skipCount+pageSize; i++ {
r := results[i] r := results[i]
@ -128,7 +128,7 @@ func (env *Environment) TxSearch(
proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines
} }
apiResults = append(apiResults, &ctypes.ResultTx{
apiResults = append(apiResults, &coretypes.ResultTx{
Hash: types.Tx(r.Tx).Hash(), Hash: types.Tx(r.Tx).Hash(),
Height: r.Height, Height: r.Height,
Index: r.Index, Index: r.Index,
@ -138,7 +138,7 @@ func (env *Environment) TxSearch(
}) })
} }
return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil
return &coretypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil
} }
} }


+ 6
- 6
internal/state/execution.go View File

@ -7,9 +7,9 @@ import (
"time" "time"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/libs/fail" "github.com/tendermint/tendermint/internal/libs/fail"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
@ -37,7 +37,7 @@ type BlockExecutor struct {
// manage the mempool lock during commit // manage the mempool lock during commit
// and update both with block results after commit. // and update both with block results after commit.
mempool mempl.Mempool
mempool mempool.Mempool
evpool EvidencePool evpool EvidencePool
logger log.Logger logger log.Logger
@ -61,7 +61,7 @@ func NewBlockExecutor(
stateStore Store, stateStore Store,
logger log.Logger, logger log.Logger,
proxyApp proxy.AppConnConsensus, proxyApp proxy.AppConnConsensus,
mempool mempl.Mempool,
pool mempool.Mempool,
evpool EvidencePool, evpool EvidencePool,
blockStore BlockStore, blockStore BlockStore,
options ...BlockExecutorOption, options ...BlockExecutorOption,
@ -70,7 +70,7 @@ func NewBlockExecutor(
store: stateStore, store: stateStore,
proxyApp: proxyApp, proxyApp: proxyApp,
eventBus: types.NopEventBus{}, eventBus: types.NopEventBus{},
mempool: mempool,
mempool: pool,
evpool: evpool, evpool: evpool,
logger: logger, logger: logger,
metrics: NopMetrics(), metrics: NopMetrics(),
@ -424,7 +424,7 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate,
} }
// Check if validator's pubkey matches an ABCI type in the consensus params // Check if validator's pubkey matches an ABCI type in the consensus params
pk, err := cryptoenc.PubKeyFromProto(valUpdate.PubKey)
pk, err := encoding.PubKeyFromProto(valUpdate.PubKey)
if err != nil { if err != nil {
return err return err
} }


+ 8
- 8
internal/state/execution_test.go View File

@ -8,12 +8,13 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/tmhash"
mmock "github.com/tendermint/tendermint/internal/mempool/mock" mmock "github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
@ -25,7 +26,6 @@ import (
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version" "github.com/tendermint/tendermint/version"
dbm "github.com/tendermint/tm-db"
) )
var ( var (
@ -218,9 +218,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
func TestValidateValidatorUpdates(t *testing.T) { func TestValidateValidatorUpdates(t *testing.T) {
pubkey1 := ed25519.GenPrivKey().PubKey() pubkey1 := ed25519.GenPrivKey().PubKey()
pubkey2 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey()
pk1, err := cryptoenc.PubKeyToProto(pubkey1)
pk1, err := encoding.PubKeyToProto(pubkey1)
assert.NoError(t, err) assert.NoError(t, err)
pk2, err := cryptoenc.PubKeyToProto(pubkey2)
pk2, err := encoding.PubKeyToProto(pubkey2)
assert.NoError(t, err) assert.NoError(t, err)
defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}}
@ -278,9 +278,9 @@ func TestUpdateValidators(t *testing.T) {
pubkey2 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey()
val2 := types.NewValidator(pubkey2, 20) val2 := types.NewValidator(pubkey2, 20)
pk, err := cryptoenc.PubKeyToProto(pubkey1)
pk, err := encoding.PubKeyToProto(pubkey1)
require.NoError(t, err) require.NoError(t, err)
pk2, err := cryptoenc.PubKeyToProto(pubkey2)
pk2, err := encoding.PubKeyToProto(pubkey2)
require.NoError(t, err) require.NoError(t, err)
testCases := []struct { testCases := []struct {
@ -385,7 +385,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
pubkey := ed25519.GenPrivKey().PubKey() pubkey := ed25519.GenPrivKey().PubKey()
pk, err := cryptoenc.PubKeyToProto(pubkey)
pk, err := encoding.PubKeyToProto(pubkey)
require.NoError(t, err) require.NoError(t, err)
app.ValidatorUpdates = []abci.ValidatorUpdate{ app.ValidatorUpdates = []abci.ValidatorUpdate{
{PubKey: pk, Power: 10}, {PubKey: pk, Power: 10},
@ -442,7 +442,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
block := sf.MakeBlock(state, 1, new(types.Commit)) block := sf.MakeBlock(state, 1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey)
vp, err := encoding.PubKeyToProto(state.Validators.Validators[0].PubKey)
require.NoError(t, err) require.NoError(t, err)
// Remove the only validator // Remove the only validator
app.ValidatorUpdates = []abci.ValidatorUpdate{ app.ValidatorUpdates = []abci.ValidatorUpdate{


+ 4
- 4
internal/state/helpers_test.go View File

@ -11,7 +11,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
sf "github.com/tendermint/tendermint/internal/state/test/factory" sf "github.com/tendermint/tendermint/internal/state/test/factory"
@ -148,11 +148,11 @@ func makeHeaderPartsResponsesValPubKeyChange(
// If the pubkey is new, remove the old and add the new. // If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0) _, val := state.NextValidators.GetByIndex(0)
if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) {
vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey)
vPbPk, err := encoding.PubKeyToProto(val.PubKey)
if err != nil { if err != nil {
panic(err) panic(err)
} }
pbPk, err := cryptoenc.PubKeyToProto(pubkey)
pbPk, err := encoding.PubKeyToProto(pubkey)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -181,7 +181,7 @@ func makeHeaderPartsResponsesValPowerChange(
// If the pubkey is new, remove the old and add the new. // If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0) _, val := state.NextValidators.GetByIndex(0)
if val.VotingPower != power { if val.VotingPower != power {
vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey)
vPbPk, err := encoding.PubKeyToProto(val.PubKey)
if err != nil { if err != nil {
panic(err) panic(err)
} }


+ 3
- 2
internal/state/indexer/block/kv/kv_test.go View File

@ -6,15 +6,16 @@ import (
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv"
"github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
db "github.com/tendermint/tm-db"
) )
func TestBlockIndexer(t *testing.T) { func TestBlockIndexer(t *testing.T) {
store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events"))
store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events"))
indexer := blockidxkv.New(store) indexer := blockidxkv.New(store)
require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{


+ 5
- 3
internal/state/indexer/indexer_service_test.go View File

@ -9,11 +9,11 @@ import (
"time" "time"
"github.com/adlio/schema" "github.com/adlio/schema"
_ "github.com/lib/pq"
dockertest "github.com/ory/dockertest" dockertest "github.com/ory/dockertest"
"github.com/ory/dockertest/docker" "github.com/ory/dockertest/docker"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
indexer "github.com/tendermint/tendermint/internal/state/indexer" indexer "github.com/tendermint/tendermint/internal/state/indexer"
@ -21,7 +21,9 @@ import (
psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql"
tmlog "github.com/tendermint/tendermint/libs/log" tmlog "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
db "github.com/tendermint/tm-db"
// Register the Postgre database driver.
_ "github.com/lib/pq"
) )
var psqldb *sql.DB var psqldb *sql.DB
@ -55,7 +57,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
pool, err := setupDB(t) pool, err := setupDB(t)
assert.Nil(t, err) assert.Nil(t, err)
store := db.NewMemDB()
store := dbm.NewMemDB()
eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink} eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink}
assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.KVSinkEnabled(eventSinks))
assert.True(t, indexer.IndexingEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks))


+ 2
- 1
internal/state/indexer/sink/kv/kv.go View File

@ -3,13 +3,14 @@ package kv
import ( import (
"context" "context"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer"
kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv"
kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv"
"github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
) )
var _ indexer.EventSink = (*EventSink)(nil) var _ indexer.EventSink = (*EventSink)(nil)


+ 9
- 8
internal/state/indexer/sink/kv/kv_test.go View File

@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"testing" "testing"
dbm "github.com/tendermint/tm-db"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -13,21 +15,20 @@ import (
kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv"
"github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
db "github.com/tendermint/tm-db"
) )
func TestType(t *testing.T) { func TestType(t *testing.T) {
kvSink := NewEventSink(db.NewMemDB())
kvSink := NewEventSink(dbm.NewMemDB())
assert.Equal(t, indexer.KV, kvSink.Type()) assert.Equal(t, indexer.KV, kvSink.Type())
} }
func TestStop(t *testing.T) { func TestStop(t *testing.T) {
kvSink := NewEventSink(db.NewMemDB())
kvSink := NewEventSink(dbm.NewMemDB())
assert.Nil(t, kvSink.Stop()) assert.Nil(t, kvSink.Stop())
} }
func TestBlockFuncs(t *testing.T) { func TestBlockFuncs(t *testing.T) {
store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events"))
store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events"))
indexer := NewEventSink(store) indexer := NewEventSink(store)
require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{
@ -158,7 +159,7 @@ func TestBlockFuncs(t *testing.T) {
} }
func TestTxSearchWithCancelation(t *testing.T) { func TestTxSearchWithCancelation(t *testing.T) {
indexer := NewEventSink(db.NewMemDB())
indexer := NewEventSink(dbm.NewMemDB())
txResult := txResultWithEvents([]abci.Event{ txResult := txResultWithEvents([]abci.Event{
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
@ -180,7 +181,7 @@ func TestTxSearchWithCancelation(t *testing.T) {
} }
func TestTxSearchDeprecatedIndexing(t *testing.T) { func TestTxSearchDeprecatedIndexing(t *testing.T) {
esdb := db.NewMemDB()
esdb := dbm.NewMemDB()
indexer := NewEventSink(esdb) indexer := NewEventSink(esdb)
// index tx using events indexing (composite key) // index tx using events indexing (composite key)
@ -260,7 +261,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) {
} }
func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
indexer := NewEventSink(db.NewMemDB())
indexer := NewEventSink(dbm.NewMemDB())
txResult := txResultWithEvents([]abci.Event{ txResult := txResultWithEvents([]abci.Event{
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
@ -282,7 +283,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
} }
func TestTxSearchMultipleTxs(t *testing.T) { func TestTxSearchMultipleTxs(t *testing.T) {
indexer := NewEventSink(db.NewMemDB())
indexer := NewEventSink(dbm.NewMemDB())
// indexed first, but bigger height (to test the order of transactions) // indexed first, but bigger height (to test the order of transactions)
txResult := txResultWithEvents([]abci.Event{ txResult := txResultWithEvents([]abci.Event{


+ 8
- 9
internal/state/indexer/tx/kv/kv_test.go View File

@ -10,8 +10,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
db "github.com/tendermint/tm-db"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
indexer "github.com/tendermint/tendermint/internal/state/indexer" indexer "github.com/tendermint/tendermint/internal/state/indexer"
@ -21,7 +20,7 @@ import (
) )
func TestTxIndex(t *testing.T) { func TestTxIndex(t *testing.T) {
txIndexer := NewTxIndex(db.NewMemDB())
txIndexer := NewTxIndex(dbm.NewMemDB())
tx := types.Tx("HELLO WORLD") tx := types.Tx("HELLO WORLD")
txResult := &abci.TxResult{ txResult := &abci.TxResult{
@ -67,7 +66,7 @@ func TestTxIndex(t *testing.T) {
} }
func TestTxSearch(t *testing.T) { func TestTxSearch(t *testing.T) {
indexer := NewTxIndex(db.NewMemDB())
indexer := NewTxIndex(dbm.NewMemDB())
txResult := txResultWithEvents([]abci.Event{ txResult := txResultWithEvents([]abci.Event{
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
@ -147,7 +146,7 @@ func TestTxSearch(t *testing.T) {
} }
func TestTxSearchWithCancelation(t *testing.T) { func TestTxSearchWithCancelation(t *testing.T) {
indexer := NewTxIndex(db.NewMemDB())
indexer := NewTxIndex(dbm.NewMemDB())
txResult := txResultWithEvents([]abci.Event{ txResult := txResultWithEvents([]abci.Event{
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
@ -165,7 +164,7 @@ func TestTxSearchWithCancelation(t *testing.T) {
} }
func TestTxSearchDeprecatedIndexing(t *testing.T) { func TestTxSearchDeprecatedIndexing(t *testing.T) {
indexer := NewTxIndex(db.NewMemDB())
indexer := NewTxIndex(dbm.NewMemDB())
// index tx using events indexing (composite key) // index tx using events indexing (composite key)
txResult1 := txResultWithEvents([]abci.Event{ txResult1 := txResultWithEvents([]abci.Event{
@ -244,7 +243,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) {
} }
func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
indexer := NewTxIndex(db.NewMemDB())
indexer := NewTxIndex(dbm.NewMemDB())
txResult := txResultWithEvents([]abci.Event{ txResult := txResultWithEvents([]abci.Event{
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
@ -266,7 +265,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
} }
func TestTxSearchMultipleTxs(t *testing.T) { func TestTxSearchMultipleTxs(t *testing.T) {
indexer := NewTxIndex(db.NewMemDB())
indexer := NewTxIndex(dbm.NewMemDB())
// indexed first, but bigger height (to test the order of transactions) // indexed first, but bigger height (to test the order of transactions)
txResult := txResultWithEvents([]abci.Event{ txResult := txResultWithEvents([]abci.Event{
@ -339,7 +338,7 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) {
require.NoError(b, err) require.NoError(b, err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store, err := db.NewDB("tx_index", "goleveldb", dir)
store, err := dbm.NewDB("tx_index", "goleveldb", dir)
require.NoError(b, err) require.NoError(b, err)
txIndexer := NewTxIndex(store) txIndexer := NewTxIndex(store)


+ 25
- 26
internal/state/state_test.go View File

@ -12,36 +12,35 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
sf "github.com/tendermint/tendermint/internal/state/test/factory"
statefactory "github.com/tendermint/tendermint/internal/state/test/factory"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
// setupTestCase does setup common to all test cases. // setupTestCase does setup common to all test cases.
func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
config := cfg.ResetTestRoot("state_")
dbType := dbm.BackendType(config.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
cfg := config.ResetTestRoot("state_")
dbType := dbm.BackendType(cfg.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir())
require.NoError(t, err) require.NoError(t, err)
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
state, err := stateStore.Load() state, err := stateStore.Load()
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, state) require.Empty(t, state)
state, err = sm.MakeGenesisStateFromFile(config.GenesisFile())
state, err = sm.MakeGenesisStateFromFile(cfg.GenesisFile())
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, state) assert.NotNil(t, state)
err = stateStore.Save(state) err = stateStore.Save(state)
require.NoError(t, err) require.NoError(t, err)
tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) }
tearDown := func(t *testing.T) { os.RemoveAll(cfg.RootDir) }
return tearDown, stateDB, state return tearDown, stateDB, state
} }
@ -106,7 +105,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
state.LastBlockHeight++ state.LastBlockHeight++
// Build mock responses. // Build mock responses.
block := sf.MakeBlock(state, 2, new(types.Commit))
block := statefactory.MakeBlock(state, 2, new(types.Commit))
abciResponses := new(tmstate.ABCIResponses) abciResponses := new(tmstate.ABCIResponses)
dtxs := make([]*abci.ResponseDeliverTx, 2) dtxs := make([]*abci.ResponseDeliverTx, 2)
@ -114,7 +113,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil}
abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil}
pbpk, err := cryptoenc.PubKeyToProto(ed25519.GenPrivKey().PubKey())
pbpk, err := encoding.PubKeyToProto(ed25519.GenPrivKey().PubKey())
require.NoError(t, err) require.NoError(t, err)
abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}} abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}}
@ -448,7 +447,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
// NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1
assert.EqualValues(t, 0, val1.ProposerPriority) assert.EqualValues(t, 0, val1.ProposerPriority)
block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
abciResponses := &tmstate.ABCIResponses{ abciResponses := &tmstate.ABCIResponses{
BeginBlock: &abci.ResponseBeginBlock{}, BeginBlock: &abci.ResponseBeginBlock{},
@ -465,7 +464,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
// add a validator // add a validator
val2PubKey := ed25519.GenPrivKey().PubKey() val2PubKey := ed25519.GenPrivKey().PubKey()
val2VotingPower := int64(100) val2VotingPower := int64(100)
fvp, err := cryptoenc.PubKeyToProto(val2PubKey)
fvp, err := encoding.PubKeyToProto(val2PubKey)
require.NoError(t, err) require.NoError(t, err)
updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower}
@ -562,7 +561,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
// we only have one validator: // we only have one validator:
assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address)
block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
// no updates: // no updates:
abciResponses := &tmstate.ABCIResponses{ abciResponses := &tmstate.ABCIResponses{
@ -583,7 +582,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
// add a validator with the same voting power as the first // add a validator with the same voting power as the first
val2PubKey := ed25519.GenPrivKey().PubKey() val2PubKey := ed25519.GenPrivKey().PubKey()
fvp, err := cryptoenc.PubKeyToProto(val2PubKey)
fvp, err := encoding.PubKeyToProto(val2PubKey)
require.NoError(t, err) require.NoError(t, err)
updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower} updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower}
validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal})
@ -749,7 +748,7 @@ func TestLargeGenesisValidator(t *testing.T) {
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err) require.NoError(t, err)
block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
@ -769,7 +768,7 @@ func TestLargeGenesisValidator(t *testing.T) {
// see: https://github.com/tendermint/tendermint/issues/2960 // see: https://github.com/tendermint/tendermint/issues/2960
firstAddedValPubKey := ed25519.GenPrivKey().PubKey() firstAddedValPubKey := ed25519.GenPrivKey().PubKey()
firstAddedValVotingPower := int64(10) firstAddedValVotingPower := int64(10)
fvp, err := cryptoenc.PubKeyToProto(firstAddedValPubKey)
fvp, err := encoding.PubKeyToProto(firstAddedValPubKey)
require.NoError(t, err) require.NoError(t, err)
firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower}
validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal})
@ -778,7 +777,7 @@ func TestLargeGenesisValidator(t *testing.T) {
BeginBlock: &abci.ResponseBeginBlock{}, BeginBlock: &abci.ResponseBeginBlock{},
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}},
} }
block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err) require.NoError(t, err)
@ -793,7 +792,7 @@ func TestLargeGenesisValidator(t *testing.T) {
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err) require.NoError(t, err)
block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit))
block := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates)
@ -816,7 +815,7 @@ func TestLargeGenesisValidator(t *testing.T) {
// add 10 validators with the same voting power as the one added directly after genesis: // add 10 validators with the same voting power as the one added directly after genesis:
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
addedPubKey := ed25519.GenPrivKey().PubKey() addedPubKey := ed25519.GenPrivKey().PubKey()
ap, err := cryptoenc.PubKeyToProto(addedPubKey)
ap, err := encoding.PubKeyToProto(addedPubKey)
require.NoError(t, err) require.NoError(t, err)
addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower} addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower}
validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal})
@ -826,7 +825,7 @@ func TestLargeGenesisValidator(t *testing.T) {
BeginBlock: &abci.ResponseBeginBlock{}, BeginBlock: &abci.ResponseBeginBlock{},
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}},
} }
block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err) require.NoError(t, err)
@ -834,14 +833,14 @@ func TestLargeGenesisValidator(t *testing.T) {
require.Equal(t, 10+2, len(state.NextValidators.Validators)) require.Equal(t, 10+2, len(state.NextValidators.Validators))
// remove genesis validator: // remove genesis validator:
gp, err := cryptoenc.PubKeyToProto(genesisPubKey)
gp, err := encoding.PubKeyToProto(genesisPubKey)
require.NoError(t, err) require.NoError(t, err)
removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0}
abciResponses = &tmstate.ABCIResponses{ abciResponses = &tmstate.ABCIResponses{
BeginBlock: &abci.ResponseBeginBlock{}, BeginBlock: &abci.ResponseBeginBlock{},
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}},
} }
block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
block = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err) require.NoError(t, err)
@ -862,7 +861,7 @@ func TestLargeGenesisValidator(t *testing.T) {
} }
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err) require.NoError(t, err)
block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit))
block = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit))
blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err) require.NoError(t, err)
@ -887,7 +886,7 @@ func TestLargeGenesisValidator(t *testing.T) {
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err) require.NoError(t, err)
block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit))
block := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit))
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
@ -982,7 +981,7 @@ func TestStateMakeBlock(t *testing.T) {
proposerAddress := state.Validators.GetProposer().Address proposerAddress := state.Validators.GetProposer().Address
stateVersion := state.Version.Consensus stateVersion := state.Version.Consensus
block := sf.MakeBlock(state, 2, new(types.Commit))
block := statefactory.MakeBlock(state, 2, new(types.Commit))
// test we set some fields // test we set some fields
assert.Equal(t, stateVersion, block.Version) assert.Equal(t, stateVersion, block.Version)


+ 6
- 7
internal/state/store_test.go View File

@ -7,11 +7,10 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
@ -102,13 +101,13 @@ func TestStoreLoadValidators(t *testing.T) {
func BenchmarkLoadValidators(b *testing.B) { func BenchmarkLoadValidators(b *testing.B) {
const valSetSize = 100 const valSetSize = 100
config := cfg.ResetTestRoot("state_")
defer os.RemoveAll(config.RootDir)
dbType := dbm.BackendType(config.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
cfg := config.ResetTestRoot("state_")
defer os.RemoveAll(cfg.RootDir)
dbType := dbm.BackendType(cfg.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir())
require.NoError(b, err) require.NoError(b, err)
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisStateFromFile(config.GenesisFile())
state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }


+ 5
- 5
internal/state/tx_filter.go View File

@ -1,22 +1,22 @@
package state package state
import ( import (
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
// TxPreCheck returns a function to filter transactions before processing. // TxPreCheck returns a function to filter transactions before processing.
// The function limits the size of a transaction to the block's maximum data size. // The function limits the size of a transaction to the block's maximum data size.
func TxPreCheck(state State) mempl.PreCheckFunc {
func TxPreCheck(state State) mempool.PreCheckFunc {
maxDataBytes := types.MaxDataBytesNoEvidence( maxDataBytes := types.MaxDataBytesNoEvidence(
state.ConsensusParams.Block.MaxBytes, state.ConsensusParams.Block.MaxBytes,
state.Validators.Size(), state.Validators.Size(),
) )
return mempl.PreCheckMaxBytes(maxDataBytes)
return mempool.PreCheckMaxBytes(maxDataBytes)
} }
// TxPostCheck returns a function to filter transactions after processing. // TxPostCheck returns a function to filter transactions after processing.
// The function limits the gas wanted by a transaction to the block's maximum total gas. // The function limits the gas wanted by a transaction to the block's maximum total gas.
func TxPostCheck(state State) mempl.PostCheckFunc {
return mempl.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas)
func TxPostCheck(state State) mempool.PostCheckFunc {
return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas)
} }

+ 10
- 10
internal/state/validation_test.go View File

@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
@ -15,14 +16,13 @@ import (
memmock "github.com/tendermint/tendermint/internal/mempool/mock" memmock "github.com/tendermint/tendermint/internal/mempool/mock"
sm "github.com/tendermint/tendermint/internal/state" sm "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/state/mocks"
sf "github.com/tendermint/tendermint/internal/state/test/factory"
"github.com/tendermint/tendermint/internal/test/factory"
statefactory "github.com/tendermint/tendermint/internal/state/test/factory"
testfactory "github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
tmtime "github.com/tendermint/tendermint/libs/time" tmtime "github.com/tendermint/tendermint/libs/time"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
) )
const validationTestsStopHeight int64 = 10 const validationTestsStopHeight int64 = 10
@ -90,7 +90,7 @@ func TestValidateBlockHeader(t *testing.T) {
Invalid blocks don't pass Invalid blocks don't pass
*/ */
for _, tc := range testCases { for _, tc := range testCases {
block := sf.MakeBlock(state, height, lastCommit)
block := statefactory.MakeBlock(state, height, lastCommit)
tc.malleateBlock(block) tc.malleateBlock(block)
err := blockExec.ValidateBlock(state, block) err := blockExec.ValidateBlock(state, block)
t.Logf("%s: %v", tc.name, err) t.Logf("%s: %v", tc.name, err)
@ -107,7 +107,7 @@ func TestValidateBlockHeader(t *testing.T) {
} }
nextHeight := validationTestsStopHeight nextHeight := validationTestsStopHeight
block := sf.MakeBlock(state, nextHeight, lastCommit)
block := statefactory.MakeBlock(state, nextHeight, lastCommit)
state.InitialHeight = nextHeight + 1 state.InitialHeight = nextHeight + 1
err := blockExec.ValidateBlock(state, block) err := blockExec.ValidateBlock(state, block)
require.Error(t, err, "expected an error when state is ahead of block") require.Error(t, err, "expected an error when state is ahead of block")
@ -141,7 +141,7 @@ func TestValidateBlockCommit(t *testing.T) {
#2589: ensure state.LastValidators.VerifyCommit fails here #2589: ensure state.LastValidators.VerifyCommit fails here
*/ */
// should be height-1 instead of height // should be height-1 instead of height
wrongHeightVote, err := factory.MakeVote(
wrongHeightVote, err := testfactory.MakeVote(
privVals[proposerAddr.String()], privVals[proposerAddr.String()],
chainID, chainID,
1, 1,
@ -158,7 +158,7 @@ func TestValidateBlockCommit(t *testing.T) {
state.LastBlockID, state.LastBlockID,
[]types.CommitSig{wrongHeightVote.CommitSig()}, []types.CommitSig{wrongHeightVote.CommitSig()},
) )
block := sf.MakeBlock(state, height, wrongHeightCommit)
block := statefactory.MakeBlock(state, height, wrongHeightCommit)
err = blockExec.ValidateBlock(state, block) err = blockExec.ValidateBlock(state, block)
_, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight)
require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err)
@ -166,7 +166,7 @@ func TestValidateBlockCommit(t *testing.T) {
/* /*
#2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size()
*/ */
block = sf.MakeBlock(state, height, wrongSigsCommit)
block = statefactory.MakeBlock(state, height, wrongSigsCommit)
err = blockExec.ValidateBlock(state, block) err = blockExec.ValidateBlock(state, block)
_, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures)
require.True(t, isErrInvalidCommitSignatures, require.True(t, isErrInvalidCommitSignatures,
@ -195,7 +195,7 @@ func TestValidateBlockCommit(t *testing.T) {
/* /*
wrongSigsCommit is fine except for the extra bad precommit wrongSigsCommit is fine except for the extra bad precommit
*/ */
goodVote, err := factory.MakeVote(
goodVote, err := testfactory.MakeVote(
privVals[proposerAddr.String()], privVals[proposerAddr.String()],
chainID, chainID,
1, 1,
@ -278,7 +278,7 @@ func TestValidateBlockEvidence(t *testing.T) {
evidence = append(evidence, newEv) evidence = append(evidence, newEv)
currentBytes += int64(len(newEv.Bytes())) currentBytes += int64(len(newEv.Bytes()))
} }
block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr)
block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, proposerAddr)
err := blockExec.ValidateBlock(state, block) err := blockExec.ValidateBlock(state, block)
if assert.Error(t, err) { if assert.Error(t, err) {
_, ok := err.(*types.ErrEvidenceOverflow) _, ok := err.(*types.ErrEvidenceOverflow)


+ 2
- 2
internal/statesync/dispatcher.go View File

@ -9,7 +9,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/light/provider" "github.com/tendermint/tendermint/light/provider"
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
proto "github.com/tendermint/tendermint/proto/tendermint/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -109,7 +109,7 @@ func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh
// Respond allows the underlying process which receives requests on the // Respond allows the underlying process which receives requests on the
// requestCh to respond with the respective light block. A nil response is used to // requestCh to respond with the respective light block. A nil response is used to
// represent that the receiver of the request does not have a light block at that height. // represent that the receiver of the request does not have a light block at that height.
func (d *Dispatcher) Respond(lb *proto.LightBlock, peer types.NodeID) error {
func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error {
d.mtx.Lock() d.mtx.Lock()
defer d.mtx.Unlock() defer d.mtx.Unlock()


+ 3
- 3
internal/test/factory/genesis.go View File

@ -3,13 +3,13 @@ package factory
import ( import (
"sort" "sort"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
tmtime "github.com/tendermint/tendermint/libs/time" tmtime "github.com/tendermint/tendermint/libs/time"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
func RandGenesisDoc( func RandGenesisDoc(
config *cfg.Config,
cfg *config.Config,
numValidators int, numValidators int,
randPower bool, randPower bool,
minPower int64) (*types.GenesisDoc, []types.PrivValidator) { minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
@ -29,7 +29,7 @@ func RandGenesisDoc(
return &types.GenesisDoc{ return &types.GenesisDoc{
GenesisTime: tmtime.Now(), GenesisTime: tmtime.Now(),
InitialHeight: 1, InitialHeight: 1,
ChainID: config.ChainID(),
ChainID: cfg.ChainID(),
Validators: validators, Validators: validators,
}, privValidators }, privValidators
} }

+ 0
- 1
light/client_test.go View File

@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/internal/test/factory"


+ 3
- 3
light/provider/http/http.go View File

@ -12,7 +12,7 @@ import (
"github.com/tendermint/tendermint/light/provider" "github.com/tendermint/tendermint/light/provider"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpchttp "github.com/tendermint/tendermint/rpc/client/http"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -300,11 +300,11 @@ func (p *http) noBlock(e error) error {
func (p *http) parseRPCError(e *rpctypes.RPCError) error { func (p *http) parseRPCError(e *rpctypes.RPCError) error {
switch { switch {
// 1) check if the error indicates that the peer doesn't have the block // 1) check if the error indicates that the peer doesn't have the block
case strings.Contains(e.Data, ctypes.ErrHeightNotAvailable.Error()):
case strings.Contains(e.Data, coretypes.ErrHeightNotAvailable.Error()):
return p.noBlock(provider.ErrLightBlockNotFound) return p.noBlock(provider.ErrLightBlockNotFound)
// 2) check if the height requested is too high // 2) check if the height requested is too high
case strings.Contains(e.Data, ctypes.ErrHeightExceedsChainHead.Error()):
case strings.Contains(e.Data, coretypes.ErrHeightExceedsChainHead.Error()):
return p.noBlock(provider.ErrHeightTooHigh) return p.noBlock(provider.ErrHeightTooHigh)
// 3) check if the provider closed the connection // 3) check if the provider closed the connection


+ 51
- 51
light/proxy/routes.go View File

@ -4,7 +4,7 @@ import (
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
lrpc "github.com/tendermint/tendermint/light/rpc" lrpc "github.com/tendermint/tendermint/light/rpc"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -52,91 +52,91 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
} }
} }
type rpcHealthFunc func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error)
type rpcHealthFunc func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error)
func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { func makeHealthFunc(c *lrpc.Client) rpcHealthFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) {
return c.Health(ctx.Context()) return c.Health(ctx.Context())
} }
} }
type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error)
type rpcStatusFunc func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error)
// nolint: interfacer // nolint: interfacer
func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { func makeStatusFunc(c *lrpc.Client) rpcStatusFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) {
return c.Status(ctx.Context()) return c.Status(ctx.Context())
} }
} }
type rpcNetInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error)
type rpcNetInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error)
func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) {
return c.NetInfo(ctx.Context()) return c.NetInfo(ctx.Context())
} }
} }
type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error)
type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) //nolint:lll
func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc { func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc {
return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) {
return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight) return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight)
} }
} }
type rpcGenesisFunc func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error)
type rpcGenesisFunc func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error)
func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc { func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) {
return c.Genesis(ctx.Context()) return c.Genesis(ctx.Context())
} }
} }
type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error)
type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error)
func makeGenesisChunkedFunc(c *lrpc.Client) rpcGenesisChunkedFunc { func makeGenesisChunkedFunc(c *lrpc.Client) rpcGenesisChunkedFunc {
return func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) {
return func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) {
return c.GenesisChunked(ctx.Context(), chunk) return c.GenesisChunked(ctx.Context(), chunk)
} }
} }
type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error)
type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error)
func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { func makeBlockFunc(c *lrpc.Client) rpcBlockFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) {
return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) {
return c.Block(ctx.Context(), height) return c.Block(ctx.Context(), height)
} }
} }
type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error)
type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error)
func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc {
return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) {
return func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) {
return c.BlockByHash(ctx.Context(), hash) return c.BlockByHash(ctx.Context(), hash)
} }
} }
type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error)
type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error)
func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) {
return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) {
return c.BlockResults(ctx.Context(), height) return c.BlockResults(ctx.Context(), height)
} }
} }
type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error)
type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error)
func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { func makeCommitFunc(c *lrpc.Client) rpcCommitFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) {
return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) {
return c.Commit(ctx.Context(), height) return c.Commit(ctx.Context(), height)
} }
} }
type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error)
type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error)
func makeTxFunc(c *lrpc.Client) rpcTxFunc { func makeTxFunc(c *lrpc.Client) rpcTxFunc {
return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
return func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) {
return c.Tx(ctx.Context(), hash, prove) return c.Tx(ctx.Context(), hash, prove)
} }
} }
@ -147,7 +147,7 @@ type rpcTxSearchFunc func(
prove bool, prove bool,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultTxSearch, error)
) (*coretypes.ResultTxSearch, error)
func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc {
return func( return func(
@ -156,7 +156,7 @@ func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc {
prove bool, prove bool,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultTxSearch, error) {
) (*coretypes.ResultTxSearch, error) {
return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy) return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy)
} }
} }
@ -167,7 +167,7 @@ type rpcBlockSearchFunc func(
prove bool, prove bool,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultBlockSearch, error)
) (*coretypes.ResultBlockSearch, error)
func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc {
return func( return func(
@ -176,90 +176,90 @@ func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc {
prove bool, prove bool,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultBlockSearch, error) {
) (*coretypes.ResultBlockSearch, error) {
return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy) return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy)
} }
} }
type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64,
page, perPage *int) (*ctypes.ResultValidators, error)
page, perPage *int) (*coretypes.ResultValidators, error)
func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc {
return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) {
return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) {
return c.Validators(ctx.Context(), height, page, perPage) return c.Validators(ctx.Context(), height, page, perPage)
} }
} }
type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error)
type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error)
func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc { func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) {
return c.DumpConsensusState(ctx.Context()) return c.DumpConsensusState(ctx.Context())
} }
} }
type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error)
type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error)
func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc { func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) {
return c.ConsensusState(ctx.Context()) return c.ConsensusState(ctx.Context())
} }
} }
type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error)
type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error)
func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) {
return c.ConsensusParams(ctx.Context(), height) return c.ConsensusParams(ctx.Context(), height)
} }
} }
type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error)
type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error)
func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc {
return func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) {
return func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) {
return c.UnconfirmedTxs(ctx.Context(), limit) return c.UnconfirmedTxs(ctx.Context(), limit)
} }
} }
type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error)
type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error)
func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) {
return c.NumUnconfirmedTxs(ctx.Context()) return c.NumUnconfirmedTxs(ctx.Context())
} }
} }
type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error)
type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error)
func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) {
return c.BroadcastTxCommit(ctx.Context(), tx) return c.BroadcastTxCommit(ctx.Context(), tx)
} }
} }
type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error)
type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error)
func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
return c.BroadcastTxSync(ctx.Context(), tx) return c.BroadcastTxSync(ctx.Context(), tx)
} }
} }
type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error)
type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error)
func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
return c.BroadcastTxAsync(ctx.Context(), tx) return c.BroadcastTxAsync(ctx.Context(), tx)
} }
} }
type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string,
data bytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error)
data bytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error)
func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc {
return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, return func(ctx *rpctypes.Context, path string, data bytes.HexBytes,
height int64, prove bool) (*ctypes.ResultABCIQuery, error) {
height int64, prove bool) (*coretypes.ResultABCIQuery, error) {
return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{
Height: height, Height: height,
@ -268,19 +268,19 @@ func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc {
} }
} }
type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error)
type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error)
func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
return func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) {
return c.ABCIInfo(ctx.Context()) return c.ABCIInfo(ctx.Context())
} }
} }
type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error)
// nolint: interfacer // nolint: interfacer
func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc {
return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
return func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) {
return c.BroadcastEvidence(ctx.Context(), ev) return c.BroadcastEvidence(ctx.Context(), ev)
} }
} }

+ 44
- 44
light/rpc/client.go View File

@ -16,7 +16,7 @@ import (
tmmath "github.com/tendermint/tendermint/libs/math" tmmath "github.com/tendermint/tendermint/libs/math"
service "github.com/tendermint/tendermint/libs/service" service "github.com/tendermint/tendermint/libs/service"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -113,22 +113,22 @@ func (c *Client) OnStop() {
} }
} }
func (c *Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
func (c *Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) {
return c.next.Status(ctx) return c.next.Status(ctx)
} }
func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) {
return c.next.ABCIInfo(ctx) return c.next.ABCIInfo(ctx)
} }
// ABCIQuery requests proof by default. // ABCIQuery requests proof by default.
func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*coretypes.ResultABCIQuery, error) { //nolint:lll
return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions)
} }
// ABCIQueryWithOptions returns an error if opts.Prove is false. // ABCIQueryWithOptions returns an error if opts.Prove is false.
func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) {
// always request the proof // always request the proof
opts.Prove = true opts.Prove = true
@ -150,7 +150,7 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb
return nil, errors.New("no proof ops") return nil, errors.New("no proof ops")
} }
if resp.Height <= 0 { if resp.Height <= 0 {
return nil, ctypes.ErrZeroOrNegativeHeight
return nil, coretypes.ErrZeroOrNegativeHeight
} }
// Update the light client if we're behind. // Update the light client if we're behind.
@ -185,46 +185,46 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb
} }
} }
return &ctypes.ResultABCIQuery{Response: resp}, nil
return &coretypes.ResultABCIQuery{Response: resp}, nil
} }
func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) {
return c.next.BroadcastTxCommit(ctx, tx) return c.next.BroadcastTxCommit(ctx, tx)
} }
func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
return c.next.BroadcastTxAsync(ctx, tx) return c.next.BroadcastTxAsync(ctx, tx)
} }
func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
return c.next.BroadcastTxSync(ctx, tx) return c.next.BroadcastTxSync(ctx, tx)
} }
func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) {
func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) {
return c.next.UnconfirmedTxs(ctx, limit) return c.next.UnconfirmedTxs(ctx, limit)
} }
func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) {
func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) {
return c.next.NumUnconfirmedTxs(ctx) return c.next.NumUnconfirmedTxs(ctx)
} }
func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) {
return c.next.CheckTx(ctx, tx) return c.next.CheckTx(ctx, tx)
} }
func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
func (c *Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) {
return c.next.NetInfo(ctx) return c.next.NetInfo(ctx)
} }
func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
func (c *Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) {
return c.next.DumpConsensusState(ctx) return c.next.DumpConsensusState(ctx)
} }
func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
func (c *Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) {
return c.next.ConsensusState(ctx) return c.next.ConsensusState(ctx)
} }
func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) {
res, err := c.next.ConsensusParams(ctx, height) res, err := c.next.ConsensusParams(ctx, height)
if err != nil { if err != nil {
return nil, err return nil, err
@ -235,7 +235,7 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re
return nil, err return nil, err
} }
if res.BlockHeight <= 0 { if res.BlockHeight <= 0 {
return nil, ctypes.ErrZeroOrNegativeHeight
return nil, coretypes.ErrZeroOrNegativeHeight
} }
// Update the light client if we're behind. // Update the light client if we're behind.
@ -253,13 +253,13 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re
return res, nil return res, nil
} }
func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
func (c *Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) {
return c.next.Health(ctx) return c.next.Health(ctx)
} }
// BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header // BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header
// returned. // returned.
func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll
res, err := c.next.BlockchainInfo(ctx, minHeight, maxHeight) res, err := c.next.BlockchainInfo(ctx, minHeight, maxHeight)
if err != nil { if err != nil {
return nil, err return nil, err
@ -298,16 +298,16 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64)
return res, nil return res, nil
} }
func (c *Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
func (c *Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) {
return c.next.Genesis(ctx) return c.next.Genesis(ctx)
} }
func (c *Client) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) {
func (c *Client) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) {
return c.next.GenesisChunked(ctx, id) return c.next.GenesisChunked(ctx, id)
} }
// Block calls rpcclient#Block and then verifies the result. // Block calls rpcclient#Block and then verifies the result.
func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
func (c *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) {
res, err := c.next.Block(ctx, height) res, err := c.next.Block(ctx, height)
if err != nil { if err != nil {
return nil, err return nil, err
@ -341,7 +341,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock,
} }
// BlockByHash calls rpcclient#BlockByHash and then verifies the result. // BlockByHash calls rpcclient#BlockByHash and then verifies the result.
func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctypes.ResultBlock, error) {
func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*coretypes.ResultBlock, error) {
res, err := c.next.BlockByHash(ctx, hash) res, err := c.next.BlockByHash(ctx, hash)
if err != nil { if err != nil {
return nil, err return nil, err
@ -376,7 +376,7 @@ func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctype
// BlockResults returns the block results for the given height. If no height is // BlockResults returns the block results for the given height. If no height is
// provided, the results of the block preceding the latest are returned. // provided, the results of the block preceding the latest are returned.
func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) {
func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) {
var h int64 var h int64
if height == nil { if height == nil {
res, err := c.next.Status(ctx) res, err := c.next.Status(ctx)
@ -397,7 +397,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul
// Validate res. // Validate res.
if res.Height <= 0 { if res.Height <= 0 {
return nil, ctypes.ErrZeroOrNegativeHeight
return nil, coretypes.ErrZeroOrNegativeHeight
} }
// Update the light client if we're behind. // Update the light client if we're behind.
@ -438,7 +438,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul
return res, nil return res, nil
} }
func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
func (c *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) {
// Update the light client if we're behind and retrieve the light block at the requested height // Update the light client if we're behind and retrieve the light block at the requested height
// or at the latest height if no height is provided. // or at the latest height if no height is provided.
l, err := c.updateLightClientIfNeededTo(ctx, height) l, err := c.updateLightClientIfNeededTo(ctx, height)
@ -446,7 +446,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi
return nil, err return nil, err
} }
return &ctypes.ResultCommit{
return &coretypes.ResultCommit{
SignedHeader: *l.SignedHeader, SignedHeader: *l.SignedHeader,
CanonicalCommit: true, CanonicalCommit: true,
}, nil }, nil
@ -454,7 +454,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi
// Tx calls rpcclient#Tx method and then verifies the proof if such was // Tx calls rpcclient#Tx method and then verifies the proof if such was
// requested. // requested.
func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*ctypes.ResultTx, error) {
func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*coretypes.ResultTx, error) {
res, err := c.next.Tx(ctx, hash, prove) res, err := c.next.Tx(ctx, hash, prove)
if err != nil || !prove { if err != nil || !prove {
return res, err return res, err
@ -462,7 +462,7 @@ func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*ct
// Validate res. // Validate res.
if res.Height <= 0 { if res.Height <= 0 {
return nil, ctypes.ErrZeroOrNegativeHeight
return nil, coretypes.ErrZeroOrNegativeHeight
} }
// Update the light client if we're behind. // Update the light client if we're behind.
@ -481,7 +481,7 @@ func (c *Client) TxSearch(
prove bool, prove bool,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultTxSearch, error) {
) (*coretypes.ResultTxSearch, error) {
return c.next.TxSearch(ctx, query, prove, page, perPage, orderBy) return c.next.TxSearch(ctx, query, prove, page, perPage, orderBy)
} }
@ -490,7 +490,7 @@ func (c *Client) BlockSearch(
query string, query string,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultBlockSearch, error) {
) (*coretypes.ResultBlockSearch, error) {
return c.next.BlockSearch(ctx, query, page, perPage, orderBy) return c.next.BlockSearch(ctx, query, page, perPage, orderBy)
} }
@ -499,7 +499,7 @@ func (c *Client) Validators(
ctx context.Context, ctx context.Context,
height *int64, height *int64,
pagePtr, perPagePtr *int, pagePtr, perPagePtr *int,
) (*ctypes.ResultValidators, error) {
) (*coretypes.ResultValidators, error) {
// Update the light client if we're behind and retrieve the light block at the // Update the light client if we're behind and retrieve the light block at the
// requested height or at the latest height if no height is provided. // requested height or at the latest height if no height is provided.
@ -518,19 +518,19 @@ func (c *Client) Validators(
skipCount := validateSkipCount(page, perPage) skipCount := validateSkipCount(page, perPage)
v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)]
return &ctypes.ResultValidators{
return &coretypes.ResultValidators{
BlockHeight: l.Height, BlockHeight: l.Height,
Validators: v, Validators: v,
Count: len(v), Count: len(v),
Total: totalCount}, nil Total: totalCount}, nil
} }
func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) {
return c.next.BroadcastEvidence(ctx, ev) return c.next.BroadcastEvidence(ctx, ev)
} }
func (c *Client) Subscribe(ctx context.Context, subscriber, query string, func (c *Client) Subscribe(ctx context.Context, subscriber, query string,
outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) {
return c.next.Subscribe(ctx, subscriber, query, outCapacity...) return c.next.Subscribe(ctx, subscriber, query, outCapacity...)
} }
@ -565,7 +565,7 @@ func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) {
// SubscribeWS subscribes for events using the given query and remote address as // SubscribeWS subscribes for events using the given query and remote address as
// a subscriber, but does not verify responses (UNSAFE)! // a subscriber, but does not verify responses (UNSAFE)!
// TODO: verify data // TODO: verify data
func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) {
func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) {
out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query)
if err != nil { if err != nil {
return nil, err return nil, err
@ -588,27 +588,27 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.Resul
} }
}() }()
return &ctypes.ResultSubscribe{}, nil
return &coretypes.ResultSubscribe{}, nil
} }
// UnsubscribeWS calls original client's Unsubscribe using remote address as a // UnsubscribeWS calls original client's Unsubscribe using remote address as a
// subscriber. // subscriber.
func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) {
func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) {
err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultUnsubscribe{}, nil
return &coretypes.ResultUnsubscribe{}, nil
} }
// UnsubscribeAllWS calls original client's UnsubscribeAll using remote address // UnsubscribeAllWS calls original client's UnsubscribeAll using remote address
// as a subscriber. // as a subscriber.
func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) {
func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) {
err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultUnsubscribe{}, nil
return &coretypes.ResultUnsubscribe{}, nil
} }
// XXX: Copied from rpc/core/env.go // XXX: Copied from rpc/core/env.go
@ -620,7 +620,7 @@ const (
func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { func validatePage(pagePtr *int, perPage, totalCount int) (int, error) {
if perPage < 1 { if perPage < 1 {
panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage))
panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage))
} }
if pagePtr == nil { // no page parameter if pagePtr == nil { // no page parameter
@ -633,7 +633,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) {
} }
page := *pagePtr page := *pagePtr
if page <= 0 || page > pages { if page <= 0 || page > pages {
return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page)
return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page)
} }
return page, nil return page, nil


+ 0
- 1
light/store/db/db_test.go View File

@ -7,7 +7,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"


+ 120
- 118
node/node.go View File

@ -6,19 +6,17 @@ import (
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
"strconv" "strconv"
"time" "time"
_ "github.com/lib/pq" // provide the psql db driver
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/rs/cors" "github.com/rs/cors"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
cs "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/p2p/pex"
@ -38,6 +36,10 @@ import (
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
_ "github.com/lib/pq" // provide the psql db driver
) )
// nodeImpl is the highest level interface to a full Tendermint node. // nodeImpl is the highest level interface to a full Tendermint node.
@ -46,7 +48,7 @@ type nodeImpl struct {
service.BaseService service.BaseService
// config // config
config *cfg.Config
config *config.Config
genesisDoc *types.GenesisDoc // initial validator set genesisDoc *types.GenesisDoc // initial validator set
privValidator types.PrivValidator // local node's validator key privValidator types.PrivValidator // local node's validator key
@ -69,7 +71,7 @@ type nodeImpl struct {
mempool mempool.Mempool mempool mempool.Mempool
stateSync bool // whether the node should state sync on startup stateSync bool // whether the node should state sync on startup
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
consensusReactor *cs.Reactor // for participating in the consensus
consensusReactor *consensus.Reactor // for participating in the consensus
pexReactor service.Service // for exchanging peer addresses pexReactor service.Service // for exchanging peer addresses
evidenceReactor service.Service evidenceReactor service.Service
rpcListeners []net.Listener // rpc servers rpcListeners []net.Listener // rpc servers
@ -81,23 +83,23 @@ type nodeImpl struct {
// newDefaultNode returns a Tendermint node with default settings for the // newDefaultNode returns a Tendermint node with default settings for the
// PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
// It implements NodeProvider. // It implements NodeProvider.
func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, error) {
nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile())
func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, error) {
nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err)
} }
if config.Mode == cfg.ModeSeed {
return makeSeedNode(config,
cfg.DefaultDBProvider,
if cfg.Mode == config.ModeSeed {
return makeSeedNode(cfg,
config.DefaultDBProvider,
nodeKey, nodeKey,
defaultGenesisDocProviderFunc(config),
defaultGenesisDocProviderFunc(cfg),
logger, logger,
) )
} }
var pval *privval.FilePV var pval *privval.FilePV
if config.Mode == cfg.ModeValidator {
pval, err = privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
if cfg.Mode == config.ModeValidator {
pval, err = privval.LoadOrGenFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -105,27 +107,27 @@ func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, err
pval = nil pval = nil
} }
appClient, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
return makeNode(config,
appClient, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir())
return makeNode(cfg,
pval, pval,
nodeKey, nodeKey,
appClient, appClient,
defaultGenesisDocProviderFunc(config),
cfg.DefaultDBProvider,
defaultGenesisDocProviderFunc(cfg),
config.DefaultDBProvider,
logger, logger,
) )
} }
// makeNode returns a new, ready to go, Tendermint Node. // makeNode returns a new, ready to go, Tendermint Node.
func makeNode(config *cfg.Config,
func makeNode(cfg *config.Config,
privValidator types.PrivValidator, privValidator types.PrivValidator,
nodeKey types.NodeKey, nodeKey types.NodeKey,
clientCreator abciclient.Creator, clientCreator abciclient.Creator,
genesisDocProvider genesisDocProvider, genesisDocProvider genesisDocProvider,
dbProvider cfg.DBProvider,
dbProvider config.DBProvider,
logger log.Logger) (service.Service, error) { logger log.Logger) (service.Service, error) {
blockStore, stateDB, err := initDBs(config, dbProvider)
blockStore, stateDB, err := initDBs(cfg, dbProvider)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -161,31 +163,31 @@ func makeNode(config *cfg.Config,
return nil, err return nil, err
} }
indexerService, eventSinks, err := createAndStartIndexerService(config, dbProvider, eventBus, logger, genDoc.ChainID)
indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// If an address is provided, listen on the socket for a connection from an // If an address is provided, listen on the socket for a connection from an
// external signing process. // external signing process.
if config.PrivValidator.ListenAddr != "" {
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr)
if cfg.PrivValidator.ListenAddr != "" {
protocol, _ := tmnet.ProtocolAndAddress(cfg.PrivValidator.ListenAddr)
// FIXME: we should start services inside OnStart // FIXME: we should start services inside OnStart
switch protocol { switch protocol {
case "grpc": case "grpc":
privValidator, err = createAndStartPrivValidatorGRPCClient(config, genDoc.ChainID, logger)
privValidator, err = createAndStartPrivValidatorGRPCClient(cfg, genDoc.ChainID, logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("error with private validator grpc client: %w", err) return nil, fmt.Errorf("error with private validator grpc client: %w", err)
} }
default: default:
privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidator.ListenAddr, genDoc.ChainID, logger)
privValidator, err = createAndStartPrivValidatorSocketClient(cfg.PrivValidator.ListenAddr, genDoc.ChainID, logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("error with private validator socket client: %w", err) return nil, fmt.Errorf("error with private validator socket client: %w", err)
} }
} }
} }
var pubKey crypto.PubKey var pubKey crypto.PubKey
if config.Mode == cfg.ModeValidator {
if cfg.Mode == config.ModeValidator {
pubKey, err = privValidator.GetPubKey(context.TODO()) pubKey, err = privValidator.GetPubKey(context.TODO())
if err != nil { if err != nil {
return nil, fmt.Errorf("can't get pubkey: %w", err) return nil, fmt.Errorf("can't get pubkey: %w", err)
@ -196,7 +198,7 @@ func makeNode(config *cfg.Config,
} }
// Determine whether we should attempt state sync. // Determine whether we should attempt state sync.
stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
stateSync := cfg.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
if stateSync && state.LastBlockHeight > 0 { if stateSync && state.LastBlockHeight > 0 {
logger.Info("Found local state with non-zero height, skipping state sync") logger.Info("Found local state with non-zero height, skipping state sync")
stateSync = false stateSync = false
@ -221,43 +223,43 @@ func makeNode(config *cfg.Config,
// Determine whether we should do block sync. This must happen after the handshake, since the // Determine whether we should do block sync. This must happen after the handshake, since the
// app may modify the validator set, specifying ourself as the only validator. // app may modify the validator set, specifying ourself as the only validator.
blockSync := config.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey)
blockSync := cfg.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey)
logNodeStartupInfo(state, pubKey, logger, consensusLogger, config.Mode)
logNodeStartupInfo(state, pubKey, logger, consensusLogger, cfg.Mode)
// TODO: Fetch and provide real options and do proper p2p bootstrapping. // TODO: Fetch and provide real options and do proper p2p bootstrapping.
// TODO: Use a persistent peer database. // TODO: Use a persistent peer database.
nodeInfo, err := makeNodeInfo(config, nodeKey, eventSinks, genDoc, state)
nodeInfo, err := makeNodeInfo(cfg, nodeKey, eventSinks, genDoc, state)
if err != nil { if err != nil {
return nil, err return nil, err
} }
p2pLogger := logger.With("module", "p2p") p2pLogger := logger.With("module", "p2p")
transport := createTransport(p2pLogger, config)
transport := createTransport(p2pLogger, cfg)
peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID)
peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create peer manager: %w", err) return nil, fmt.Errorf("failed to create peer manager: %w", err)
} }
nodeMetrics := nodeMetrics :=
defaultMetricsProvider(config.Instrumentation)(genDoc.ChainID)
defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID)
router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey,
peerManager, transport, getRouterConfig(config, proxyApp))
peerManager, transport, getRouterConfig(cfg, proxyApp))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create router: %w", err) return nil, fmt.Errorf("failed to create router: %w", err)
} }
mpReactorShim, mpReactor, mp, err := createMempoolReactor( mpReactorShim, mpReactor, mp, err := createMempoolReactor(
config, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger,
cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger,
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
evReactorShim, evReactor, evPool, err := createEvidenceReactor( evReactorShim, evReactor, evPool, err := createEvidenceReactor(
config, dbProvider, stateDB, blockStore, peerManager, router, logger,
cfg, dbProvider, stateDB, blockStore, peerManager, router, logger,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -275,16 +277,16 @@ func makeNode(config *cfg.Config,
) )
csReactorShim, csReactor, csState := createConsensusReactor( csReactorShim, csReactor, csState := createConsensusReactor(
config, state, blockExec, blockStore, mp, evPool,
privValidator, nodeMetrics.cs, stateSync || blockSync, eventBus,
cfg, state, blockExec, blockStore, mp, evPool,
privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus,
peerManager, router, consensusLogger, peerManager, router, consensusLogger,
) )
// Create the blockchain reactor. Note, we do not start block sync if we're // Create the blockchain reactor. Note, we do not start block sync if we're
// doing a state sync first. // doing a state sync first.
bcReactorShim, bcReactor, err := createBlockchainReactor( bcReactorShim, bcReactor, err := createBlockchainReactor(
logger, config, state, blockExec, blockStore, csReactor,
peerManager, router, blockSync && !stateSync, nodeMetrics.cs,
logger, cfg, state, blockExec, blockStore, csReactor,
peerManager, router, blockSync && !stateSync, nodeMetrics.consensus,
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("could not create blockchain reactor: %w", err) return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
@ -301,9 +303,9 @@ func makeNode(config *cfg.Config,
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
// FIXME We need to update metrics here, since other reactors don't have access to them. // FIXME We need to update metrics here, since other reactors don't have access to them.
if stateSync { if stateSync {
nodeMetrics.cs.StateSyncing.Set(1)
nodeMetrics.consensus.StateSyncing.Set(1)
} else if blockSync { } else if blockSync {
nodeMetrics.cs.BlockSyncing.Set(1)
nodeMetrics.consensus.BlockSyncing.Set(1)
} }
// Set up state sync reactor, and schedule a sync if requested. // Set up state sync reactor, and schedule a sync if requested.
@ -320,7 +322,7 @@ func makeNode(config *cfg.Config,
stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims)
if config.P2P.UseLegacy {
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(stateSyncReactorShim) channels = getChannelsFromShim(stateSyncReactorShim)
peerUpdates = stateSyncReactorShim.PeerUpdates peerUpdates = stateSyncReactorShim.PeerUpdates
} else { } else {
@ -331,7 +333,7 @@ func makeNode(config *cfg.Config,
stateSyncReactor = statesync.NewReactor( stateSyncReactor = statesync.NewReactor(
genDoc.ChainID, genDoc.ChainID,
genDoc.InitialHeight, genDoc.InitialHeight,
*config.StateSync,
*cfg.StateSync,
stateSyncReactorShim.Logger, stateSyncReactorShim.Logger,
proxyApp.Snapshot(), proxyApp.Snapshot(),
proxyApp.Query(), proxyApp.Query(),
@ -342,7 +344,7 @@ func makeNode(config *cfg.Config,
peerUpdates, peerUpdates,
stateStore, stateStore,
blockStore, blockStore,
config.StateSync.TempDir,
cfg.StateSync.TempDir,
nodeMetrics.statesync, nodeMetrics.statesync,
) )
@ -378,46 +380,46 @@ func makeNode(config *cfg.Config,
pexCh := pex.ChannelDescriptor() pexCh := pex.ChannelDescriptor()
transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh})
if config.P2P.UseLegacy {
if cfg.P2P.UseLegacy {
// setup Transport and Switch // setup Transport and Switch
sw = createSwitch( sw = createSwitch(
config, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch,
cfg, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch,
stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger,
) )
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " "))
if err != nil { if err != nil {
return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err)
} }
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " "))
if err != nil { if err != nil {
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
} }
addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not create addrbook: %w", err) return nil, fmt.Errorf("could not create addrbook: %w", err)
} }
pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger)
} else { } else {
addrBook = nil addrBook = nil
pexReactor, err = createPEXReactorV2(config, logger, peerManager, router)
pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
if config.RPC.PprofListenAddress != "" {
if cfg.RPC.PprofListenAddress != "" {
go func() { go func() {
logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress)
logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil))
}() }()
} }
node := &nodeImpl{ node := &nodeImpl{
config: config,
config: cfg,
genesisDoc: genDoc, genesisDoc: genDoc,
privValidator: privValidator, privValidator: privValidator,
@ -452,7 +454,7 @@ func makeNode(config *cfg.Config,
ConsensusState: csState, ConsensusState: csState,
ConsensusReactor: csReactor, ConsensusReactor: csReactor,
BlockSyncReactor: bcReactor.(cs.BlockSyncReactor),
BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor),
P2PPeers: sw, P2PPeers: sw,
PeerManager: peerManager, PeerManager: peerManager,
@ -462,7 +464,7 @@ func makeNode(config *cfg.Config,
EventBus: eventBus, EventBus: eventBus,
Mempool: mp, Mempool: mp,
Logger: logger.With("module", "rpc"), Logger: logger.With("module", "rpc"),
Config: *config.RPC,
Config: *cfg.RPC,
}, },
} }
@ -485,8 +487,8 @@ func makeNode(config *cfg.Config,
} }
// makeSeedNode returns a new seed node, containing only p2p, pex reactor // makeSeedNode returns a new seed node, containing only p2p, pex reactor
func makeSeedNode(config *cfg.Config,
dbProvider cfg.DBProvider,
func makeSeedNode(cfg *config.Config,
dbProvider config.DBProvider,
nodeKey types.NodeKey, nodeKey types.NodeKey,
genesisDocProvider genesisDocProvider, genesisDocProvider genesisDocProvider,
logger log.Logger, logger log.Logger,
@ -502,23 +504,23 @@ func makeSeedNode(config *cfg.Config,
return nil, err return nil, err
} }
nodeInfo, err := makeSeedNodeInfo(config, nodeKey, genDoc, state)
nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Setup Transport and Switch. // Setup Transport and Switch.
p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID)
p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID)
p2pLogger := logger.With("module", "p2p") p2pLogger := logger.With("module", "p2p")
transport := createTransport(p2pLogger, config)
transport := createTransport(p2pLogger, cfg)
peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID)
peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create peer manager: %w", err) return nil, fmt.Errorf("failed to create peer manager: %w", err)
} }
router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey,
peerManager, transport, getRouterConfig(config, nil))
peerManager, transport, getRouterConfig(cfg, nil))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create router: %w", err) return nil, fmt.Errorf("failed to create router: %w", err)
} }
@ -536,44 +538,44 @@ func makeSeedNode(config *cfg.Config,
pexCh := pex.ChannelDescriptor() pexCh := pex.ChannelDescriptor()
transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh})
if config.P2P.UseLegacy {
if cfg.P2P.UseLegacy {
sw = createSwitch( sw = createSwitch(
config, transport, p2pMetrics, nil, nil,
cfg, transport, p2pMetrics, nil, nil,
nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger,
) )
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " "))
if err != nil { if err != nil {
return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
} }
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " "))
if err != nil { if err != nil {
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
} }
addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not create addrbook: %w", err) return nil, fmt.Errorf("could not create addrbook: %w", err)
} }
pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger)
} else { } else {
pexReactor, err = createPEXReactorV2(config, logger, peerManager, router)
pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
if config.RPC.PprofListenAddress != "" {
if cfg.RPC.PprofListenAddress != "" {
go func() { go func() {
logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress)
logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil))
}() }()
} }
node := &nodeImpl{ node := &nodeImpl{
config: config,
config: cfg,
genesisDoc: genDoc, genesisDoc: genDoc,
transport: transport, transport: transport,
@ -602,7 +604,7 @@ func (n *nodeImpl) OnStart() error {
// Start the RPC server before the P2P server // Start the RPC server before the P2P server
// so we can eg. receive txs for the first block // so we can eg. receive txs for the first block
if n.config.RPC.ListenAddress != "" && n.config.Mode != cfg.ModeSeed {
if n.config.RPC.ListenAddress != "" && n.config.Mode != config.ModeSeed {
listeners, err := n.startRPC() listeners, err := n.startRPC()
if err != nil { if err != nil {
return err return err
@ -637,8 +639,8 @@ func (n *nodeImpl) OnStart() error {
return err return err
} }
if n.config.Mode != cfg.ModeSeed {
if n.config.BlockSync.Version == cfg.BlockSyncV0 {
if n.config.Mode != config.ModeSeed {
if n.config.BlockSync.Version == config.BlockSyncV0 {
if err := n.bcReactor.Start(); err != nil { if err := n.bcReactor.Start(); err != nil {
return err return err
} }
@ -679,7 +681,7 @@ func (n *nodeImpl) OnStart() error {
// TODO: We shouldn't run state sync if we already have state that has a // TODO: We shouldn't run state sync if we already have state that has a
// LastBlockHeight that is not InitialHeight // LastBlockHeight that is not InitialHeight
if n.stateSync { if n.stateSync {
bcR, ok := n.bcReactor.(cs.BlockSyncReactor)
bcR, ok := n.bcReactor.(consensus.BlockSyncReactor)
if !ok { if !ok {
return fmt.Errorf("this blockchain reactor does not support switching from state sync") return fmt.Errorf("this blockchain reactor does not support switching from state sync")
} }
@ -758,9 +760,9 @@ func (n *nodeImpl) OnStop() {
n.Logger.Error("Error closing indexerService", "err", err) n.Logger.Error("Error closing indexerService", "err", err)
} }
if n.config.Mode != cfg.ModeSeed {
if n.config.Mode != config.ModeSeed {
// now stop the reactors // now stop the reactors
if n.config.BlockSync.Version == cfg.BlockSyncV0 {
if n.config.BlockSync.Version == config.BlockSyncV0 {
// Stop the real blockchain reactor separately since the switch uses the shim. // Stop the real blockchain reactor separately since the switch uses the shim.
if err := n.bcReactor.Stop(); err != nil { if err := n.bcReactor.Stop(); err != nil {
n.Logger.Error("failed to stop the blockchain reactor", "err", err) n.Logger.Error("failed to stop the blockchain reactor", "err", err)
@ -831,7 +833,7 @@ func (n *nodeImpl) OnStop() {
} }
func (n *nodeImpl) startRPC() ([]net.Listener, error) { func (n *nodeImpl) startRPC() ([]net.Listener, error) {
if n.config.Mode == cfg.ModeValidator {
if n.config.Mode == config.ModeValidator {
pubKey, err := n.privValidator.GetPubKey(context.TODO()) pubKey, err := n.privValidator.GetPubKey(context.TODO())
if pubKey == nil || err != nil { if pubKey == nil || err != nil {
return nil, fmt.Errorf("can't get pubkey: %w", err) return nil, fmt.Errorf("can't get pubkey: %w", err)
@ -849,15 +851,15 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) {
n.rpcEnv.AddUnsafe(routes) n.rpcEnv.AddUnsafe(routes)
} }
config := rpcserver.DefaultConfig()
config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
cfg := rpcserver.DefaultConfig()
cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes
cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
cfg.MaxOpenConnections = n.config.RPC.MaxOpenConnections
// If necessary adjust global WriteTimeout to ensure it's greater than // If necessary adjust global WriteTimeout to ensure it's greater than
// TimeoutBroadcastTxCommit. // TimeoutBroadcastTxCommit.
// See https://github.com/tendermint/tendermint/issues/3435 // See https://github.com/tendermint/tendermint/issues/3435
if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
} }
// we may expose the rpc over both a unix and tcp socket // we may expose the rpc over both a unix and tcp socket
@ -873,14 +875,14 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) {
wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
} }
}), }),
rpcserver.ReadLimit(config.MaxBodyBytes),
rpcserver.ReadLimit(cfg.MaxBodyBytes),
) )
wm.SetLogger(wmLogger) wm.SetLogger(wmLogger)
mux.HandleFunc("/websocket", wm.WebsocketHandler) mux.HandleFunc("/websocket", wm.WebsocketHandler)
rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger)
listener, err := rpcserver.Listen( listener, err := rpcserver.Listen(
listenAddr, listenAddr,
config.MaxOpenConnections,
cfg.MaxOpenConnections,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -903,7 +905,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) {
n.config.RPC.CertFile(), n.config.RPC.CertFile(),
n.config.RPC.KeyFile(), n.config.RPC.KeyFile(),
rpcLogger, rpcLogger,
config,
cfg,
); err != nil { ); err != nil {
n.Logger.Error("Error serving server with TLS", "err", err) n.Logger.Error("Error serving server with TLS", "err", err)
} }
@ -914,7 +916,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) {
listener, listener,
rootHandler, rootHandler,
rpcLogger, rpcLogger,
config,
cfg,
); err != nil { ); err != nil {
n.Logger.Error("Error serving server", "err", err) n.Logger.Error("Error serving server", "err", err)
} }
@ -927,18 +929,18 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) {
// we expose a simplified api over grpc for convenience to app devs // we expose a simplified api over grpc for convenience to app devs
grpcListenAddr := n.config.RPC.GRPCListenAddress grpcListenAddr := n.config.RPC.GRPCListenAddress
if grpcListenAddr != "" { if grpcListenAddr != "" {
config := rpcserver.DefaultConfig()
config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
cfg := rpcserver.DefaultConfig()
cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes
cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
cfg.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
// If necessary adjust global WriteTimeout to ensure it's greater than // If necessary adjust global WriteTimeout to ensure it's greater than
// TimeoutBroadcastTxCommit. // TimeoutBroadcastTxCommit.
// See https://github.com/tendermint/tendermint/issues/3435 // See https://github.com/tendermint/tendermint/issues/3435
if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
} }
listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections)
listener, err := rpcserver.Listen(grpcListenAddr, cfg.MaxOpenConnections)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -977,7 +979,7 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server {
} }
// ConsensusReactor returns the Node's ConsensusReactor. // ConsensusReactor returns the Node's ConsensusReactor.
func (n *nodeImpl) ConsensusReactor() *cs.Reactor {
func (n *nodeImpl) ConsensusReactor() *consensus.Reactor {
return n.consensusReactor return n.consensusReactor
} }
@ -1031,14 +1033,14 @@ type genesisDocProvider func() (*types.GenesisDoc, error)
// defaultGenesisDocProviderFunc returns a GenesisDocProvider that loads // defaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
// the GenesisDoc from the config.GenesisFile() on the filesystem. // the GenesisDoc from the config.GenesisFile() on the filesystem.
func defaultGenesisDocProviderFunc(config *cfg.Config) genesisDocProvider {
func defaultGenesisDocProviderFunc(cfg *config.Config) genesisDocProvider {
return func() (*types.GenesisDoc, error) { return func() (*types.GenesisDoc, error) {
return types.GenesisDocFromFile(config.GenesisFile())
return types.GenesisDocFromFile(cfg.GenesisFile())
} }
} }
type nodeMetrics struct { type nodeMetrics struct {
cs *cs.Metrics
consensus *consensus.Metrics
p2p *p2p.Metrics p2p *p2p.Metrics
mempool *mempool.Metrics mempool *mempool.Metrics
state *sm.Metrics state *sm.Metrics
@ -1050,19 +1052,19 @@ type metricsProvider func(chainID string) *nodeMetrics
// defaultMetricsProvider returns Metrics build using Prometheus client library // defaultMetricsProvider returns Metrics build using Prometheus client library
// if Prometheus is enabled. Otherwise, it returns no-op Metrics. // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
func defaultMetricsProvider(config *cfg.InstrumentationConfig) metricsProvider {
func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider {
return func(chainID string) *nodeMetrics { return func(chainID string) *nodeMetrics {
if config.Prometheus {
if cfg.Prometheus {
return &nodeMetrics{ return &nodeMetrics{
cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
mempool.PrometheusMetrics(config.Namespace, "chain_id", chainID),
sm.PrometheusMetrics(config.Namespace, "chain_id", chainID),
statesync.PrometheusMetrics(config.Namespace, "chain_id", chainID),
consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID),
p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID),
mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID),
sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID),
statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID),
} }
} }
return &nodeMetrics{ return &nodeMetrics{
cs.NopMetrics(),
consensus.NopMetrics(),
p2p.NopMetrics(), p2p.NopMetrics(),
mempool.NopMetrics(), mempool.NopMetrics(),
sm.NopMetrics(), sm.NopMetrics(),
@ -1130,15 +1132,15 @@ func createAndStartPrivValidatorSocketClient(
} }
func createAndStartPrivValidatorGRPCClient( func createAndStartPrivValidatorGRPCClient(
config *cfg.Config,
cfg *config.Config,
chainID string, chainID string,
logger log.Logger, logger log.Logger,
) (types.PrivValidator, error) { ) (types.PrivValidator, error) {
pvsc, err := tmgrpc.DialRemoteSigner( pvsc, err := tmgrpc.DialRemoteSigner(
config.PrivValidator,
cfg.PrivValidator,
chainID, chainID,
logger, logger,
config.Instrumentation.Prometheus,
cfg.Instrumentation.Prometheus,
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to start private validator: %w", err) return nil, fmt.Errorf("failed to start private validator: %w", err)
@ -1153,7 +1155,7 @@ func createAndStartPrivValidatorGRPCClient(
return pvsc, nil return pvsc, nil
} }
func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOptions {
func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions {
opts := p2p.RouterOptions{ opts := p2p.RouterOptions{
QueueType: conf.P2P.QueueType, QueueType: conf.P2P.QueueType,
} }


+ 75
- 76
node/node_test.go View File

@ -13,12 +13,11 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/tmhash"
@ -38,12 +37,12 @@ import (
) )
func TestNodeStartStop(t *testing.T) { func TestNodeStartStop(t *testing.T) {
config := cfg.ResetTestRoot("node_node_test")
cfg := config.ResetTestRoot("node_node_test")
defer os.RemoveAll(config.RootDir)
defer os.RemoveAll(cfg.RootDir)
// create & start node // create & start node
ns, err := newDefaultNode(config, log.TestingLogger())
ns, err := newDefaultNode(cfg, log.TestingLogger())
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, ns.Start()) require.NoError(t, ns.Start())
@ -81,7 +80,7 @@ func TestNodeStartStop(t *testing.T) {
} }
} }
func getTestNode(t *testing.T, conf *cfg.Config, logger log.Logger) *nodeImpl {
func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl {
t.Helper() t.Helper()
ns, err := newDefaultNode(conf, logger) ns, err := newDefaultNode(conf, logger)
require.NoError(t, err) require.NoError(t, err)
@ -92,12 +91,12 @@ func getTestNode(t *testing.T, conf *cfg.Config, logger log.Logger) *nodeImpl {
} }
func TestNodeDelayedStart(t *testing.T) { func TestNodeDelayedStart(t *testing.T) {
config := cfg.ResetTestRoot("node_delayed_start_test")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("node_delayed_start_test")
defer os.RemoveAll(cfg.RootDir)
now := tmtime.Now() now := tmtime.Now()
// create & start node // create & start node
n := getTestNode(t, config, log.TestingLogger())
n := getTestNode(t, cfg, log.TestingLogger())
n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) n.GenesisDoc().GenesisTime = now.Add(2 * time.Second)
require.NoError(t, n.Start()) require.NoError(t, n.Start())
@ -108,11 +107,11 @@ func TestNodeDelayedStart(t *testing.T) {
} }
func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetAppVersion(t *testing.T) {
config := cfg.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(cfg.RootDir)
// create node // create node
n := getTestNode(t, config, log.TestingLogger())
n := getTestNode(t, cfg, log.TestingLogger())
// default config uses the kvstore app // default config uses the kvstore app
var appVersion uint64 = kvstore.ProtocolVersion var appVersion uint64 = kvstore.ProtocolVersion
@ -129,9 +128,9 @@ func TestNodeSetAppVersion(t *testing.T) {
func TestNodeSetPrivValTCP(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) {
addr := "tcp://" + testFreeAddr(t) addr := "tcp://" + testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.PrivValidator.ListenAddr = addr
cfg := config.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(cfg.RootDir)
cfg.PrivValidator.ListenAddr = addr
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
dialerEndpoint := privval.NewSignerDialerEndpoint( dialerEndpoint := privval.NewSignerDialerEndpoint(
@ -142,7 +141,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
signerServer := privval.NewSignerServer( signerServer := privval.NewSignerServer(
dialerEndpoint, dialerEndpoint,
config.ChainID(),
cfg.ChainID(),
types.NewMockPV(), types.NewMockPV(),
) )
@ -154,7 +153,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
}() }()
defer signerServer.Stop() //nolint:errcheck // ignore for tests defer signerServer.Stop() //nolint:errcheck // ignore for tests
n := getTestNode(t, config, log.TestingLogger())
n := getTestNode(t, cfg, log.TestingLogger())
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
} }
@ -162,11 +161,11 @@ func TestNodeSetPrivValTCP(t *testing.T) {
func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
addrNoPrefix := testFreeAddr(t) addrNoPrefix := testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.PrivValidator.ListenAddr = addrNoPrefix
cfg := config.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(cfg.RootDir)
cfg.PrivValidator.ListenAddr = addrNoPrefix
_, err := newDefaultNode(config, log.TestingLogger())
_, err := newDefaultNode(cfg, log.TestingLogger())
assert.Error(t, err) assert.Error(t, err)
} }
@ -174,9 +173,9 @@ func TestNodeSetPrivValIPC(t *testing.T) {
tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock"
defer os.Remove(tmpfile) // clean up defer os.Remove(tmpfile) // clean up
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.PrivValidator.ListenAddr = "unix://" + tmpfile
cfg := config.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(cfg.RootDir)
cfg.PrivValidator.ListenAddr = "unix://" + tmpfile
dialer := privval.DialUnixFn(tmpfile) dialer := privval.DialUnixFn(tmpfile)
dialerEndpoint := privval.NewSignerDialerEndpoint( dialerEndpoint := privval.NewSignerDialerEndpoint(
@ -187,7 +186,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
pvsc := privval.NewSignerServer( pvsc := privval.NewSignerServer(
dialerEndpoint, dialerEndpoint,
config.ChainID(),
cfg.ChainID(),
types.NewMockPV(), types.NewMockPV(),
) )
@ -196,7 +195,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
}() }()
defer pvsc.Stop() //nolint:errcheck // ignore for tests defer pvsc.Stop() //nolint:errcheck // ignore for tests
n := getTestNode(t, config, log.TestingLogger())
n := getTestNode(t, cfg, log.TestingLogger())
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
} }
@ -212,8 +211,8 @@ func testFreeAddr(t *testing.T) string {
// create a proposal block using real and full // create a proposal block using real and full
// mempool and evidence pool and validate it. // mempool and evidence pool and validate it.
func TestCreateProposalBlock(t *testing.T) { func TestCreateProposalBlock(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(cfg.RootDir)
cc := abciclient.NewLocalCreator(kvstore.NewApplication()) cc := abciclient.NewLocalCreator(kvstore.NewApplication())
proxyApp := proxy.NewAppConns(cc) proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start() err := proxyApp.Start()
@ -233,7 +232,7 @@ func TestCreateProposalBlock(t *testing.T) {
proposerAddr, _ := state.Validators.GetByIndex(0) proposerAddr, _ := state.Validators.GetByIndex(0)
mp := mempoolv0.NewCListMempool( mp := mempoolv0.NewCListMempool(
config.Mempool,
cfg.Mempool,
proxyApp.Mempool(), proxyApp.Mempool(),
state.LastBlockHeight, state.LastBlockHeight,
mempoolv0.WithMetrics(mempool.NopMetrics()), mempoolv0.WithMetrics(mempool.NopMetrics()),
@ -304,8 +303,8 @@ func TestCreateProposalBlock(t *testing.T) {
} }
func TestMaxTxsProposalBlockSize(t *testing.T) { func TestMaxTxsProposalBlockSize(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(cfg.RootDir)
cc := abciclient.NewLocalCreator(kvstore.NewApplication()) cc := abciclient.NewLocalCreator(kvstore.NewApplication())
proxyApp := proxy.NewAppConns(cc) proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start() err := proxyApp.Start()
@ -325,7 +324,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) {
// Make Mempool // Make Mempool
mp := mempoolv0.NewCListMempool( mp := mempoolv0.NewCListMempool(
config.Mempool,
cfg.Mempool,
proxyApp.Mempool(), proxyApp.Mempool(),
state.LastBlockHeight, state.LastBlockHeight,
mempoolv0.WithMetrics(mempool.NopMetrics()), mempoolv0.WithMetrics(mempool.NopMetrics()),
@ -366,8 +365,8 @@ func TestMaxTxsProposalBlockSize(t *testing.T) {
} }
func TestMaxProposalBlockSize(t *testing.T) { func TestMaxProposalBlockSize(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(cfg.RootDir)
cc := abciclient.NewLocalCreator(kvstore.NewApplication()) cc := abciclient.NewLocalCreator(kvstore.NewApplication())
proxyApp := proxy.NewAppConns(cc) proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start() err := proxyApp.Start()
@ -385,7 +384,7 @@ func TestMaxProposalBlockSize(t *testing.T) {
// Make Mempool // Make Mempool
mp := mempoolv0.NewCListMempool( mp := mempoolv0.NewCListMempool(
config.Mempool,
cfg.Mempool,
proxyApp.Mempool(), proxyApp.Mempool(),
state.LastBlockHeight, state.LastBlockHeight,
mempoolv0.WithMetrics(mempool.NopMetrics()), mempoolv0.WithMetrics(mempool.NopMetrics()),
@ -481,17 +480,17 @@ func TestMaxProposalBlockSize(t *testing.T) {
} }
func TestNodeNewSeedNode(t *testing.T) { func TestNodeNewSeedNode(t *testing.T) {
config := cfg.ResetTestRoot("node_new_node_custom_reactors_test")
config.Mode = cfg.ModeSeed
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("node_new_node_custom_reactors_test")
cfg.Mode = config.ModeSeed
defer os.RemoveAll(cfg.RootDir)
nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile())
nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile())
require.NoError(t, err) require.NoError(t, err)
ns, err := makeSeedNode(config,
cfg.DefaultDBProvider,
ns, err := makeSeedNode(cfg,
config.DefaultDBProvider,
nodeKey, nodeKey,
defaultGenesisDocProviderFunc(config),
defaultGenesisDocProviderFunc(cfg),
log.TestingLogger(), log.TestingLogger(),
) )
require.NoError(t, err) require.NoError(t, err)
@ -505,68 +504,68 @@ func TestNodeNewSeedNode(t *testing.T) {
} }
func TestNodeSetEventSink(t *testing.T) { func TestNodeSetEventSink(t *testing.T) {
config := cfg.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(config.RootDir)
cfg := config.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(cfg.RootDir)
logger := log.TestingLogger() logger := log.TestingLogger()
setupTest := func(t *testing.T, conf *cfg.Config) []indexer.EventSink {
setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink {
eventBus, err := createAndStartEventBus(logger) eventBus, err := createAndStartEventBus(logger)
require.NoError(t, err) require.NoError(t, err)
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
require.NoError(t, err) require.NoError(t, err)
indexService, eventSinks, err := createAndStartIndexerService(config,
cfg.DefaultDBProvider, eventBus, logger, genDoc.ChainID)
indexService, eventSinks, err := createAndStartIndexerService(cfg,
config.DefaultDBProvider, eventBus, logger, genDoc.ChainID)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) t.Cleanup(func() { require.NoError(t, indexService.Stop()) })
return eventSinks return eventSinks
} }
eventSinks := setupTest(t, config)
eventSinks := setupTest(t, cfg)
assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.KV, eventSinks[0].Type()) assert.Equal(t, indexer.KV, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"null"}
eventSinks = setupTest(t, config)
cfg.TxIndex.Indexer = []string{"null"}
eventSinks = setupTest(t, cfg)
assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.NULL, eventSinks[0].Type()) assert.Equal(t, indexer.NULL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"null", "kv"}
eventSinks = setupTest(t, config)
cfg.TxIndex.Indexer = []string{"null", "kv"}
eventSinks = setupTest(t, cfg)
assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.NULL, eventSinks[0].Type()) assert.Equal(t, indexer.NULL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"kvv"}
ns, err := newDefaultNode(config, logger)
cfg.TxIndex.Indexer = []string{"kvv"}
ns, err := newDefaultNode(cfg, logger)
assert.Nil(t, ns) assert.Nil(t, ns)
assert.Equal(t, errors.New("unsupported event sink type"), err) assert.Equal(t, errors.New("unsupported event sink type"), err)
config.TxIndex.Indexer = []string{}
eventSinks = setupTest(t, config)
cfg.TxIndex.Indexer = []string{}
eventSinks = setupTest(t, cfg)
assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.NULL, eventSinks[0].Type()) assert.Equal(t, indexer.NULL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"psql"}
ns, err = newDefaultNode(config, logger)
cfg.TxIndex.Indexer = []string{"psql"}
ns, err = newDefaultNode(cfg, logger)
assert.Nil(t, ns) assert.Nil(t, ns)
assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err)
var psqlConn = "test" var psqlConn = "test"
config.TxIndex.Indexer = []string{"psql"}
config.TxIndex.PsqlConn = psqlConn
eventSinks = setupTest(t, config)
cfg.TxIndex.Indexer = []string{"psql"}
cfg.TxIndex.PsqlConn = psqlConn
eventSinks = setupTest(t, cfg)
assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) assert.Equal(t, indexer.PSQL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"psql", "kv"}
config.TxIndex.PsqlConn = psqlConn
eventSinks = setupTest(t, config)
cfg.TxIndex.Indexer = []string{"psql", "kv"}
cfg.TxIndex.PsqlConn = psqlConn
eventSinks = setupTest(t, cfg)
assert.Equal(t, 2, len(eventSinks)) assert.Equal(t, 2, len(eventSinks))
// we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks. // we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks.
@ -577,9 +576,9 @@ func TestNodeSetEventSink(t *testing.T) {
assert.Equal(t, indexer.KV, eventSinks[1].Type()) assert.Equal(t, indexer.KV, eventSinks[1].Type())
} }
config.TxIndex.Indexer = []string{"kv", "psql"}
config.TxIndex.PsqlConn = psqlConn
eventSinks = setupTest(t, config)
cfg.TxIndex.Indexer = []string{"kv", "psql"}
cfg.TxIndex.PsqlConn = psqlConn
eventSinks = setupTest(t, cfg)
assert.Equal(t, 2, len(eventSinks)) assert.Equal(t, 2, len(eventSinks))
if eventSinks[0].Type() == indexer.KV { if eventSinks[0].Type() == indexer.KV {
@ -590,15 +589,15 @@ func TestNodeSetEventSink(t *testing.T) {
} }
var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
config.TxIndex.Indexer = []string{"psql", "kv", "Kv"}
config.TxIndex.PsqlConn = psqlConn
_, err = newDefaultNode(config, logger)
cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"}
cfg.TxIndex.PsqlConn = psqlConn
_, err = newDefaultNode(cfg, logger)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, e, err) assert.Equal(t, e, err)
config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"}
config.TxIndex.PsqlConn = psqlConn
_, err = newDefaultNode(config, logger)
cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"}
cfg.TxIndex.PsqlConn = psqlConn
_, err = newDefaultNode(cfg, logger)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, e, err) assert.Equal(t, e, err)
} }
@ -648,13 +647,13 @@ func loadStatefromGenesis(t *testing.T) sm.State {
stateDB := dbm.NewMemDB() stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB) stateStore := sm.NewStore(stateDB)
config := cfg.ResetTestRoot("load_state_from_genesis")
cfg := config.ResetTestRoot("load_state_from_genesis")
loadedState, err := stateStore.Load() loadedState, err := stateStore.Load()
require.NoError(t, err) require.NoError(t, err)
require.True(t, loadedState.IsEmpty()) require.True(t, loadedState.IsEmpty())
genDoc, _ := factory.RandGenesisDoc(config, 0, false, 10)
genDoc, _ := factory.RandGenesisDoc(cfg, 0, false, 10)
state, err := loadStateFromDBOrGenesisDocProvider( state, err := loadStateFromDBOrGenesisDocProvider(
stateStore, stateStore,


+ 109
- 108
node/setup.go View File

@ -7,18 +7,17 @@ import (
"fmt" "fmt"
"math" "math"
"net" "net"
_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
"time" "time"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client" abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0"
bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2" bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2"
cs "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence"
"github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/mempool"
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
@ -37,17 +36,19 @@ import (
"github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version" "github.com/tendermint/tendermint/version"
_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
) )
func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
func initDBs(cfg *config.Config, dbProvider config.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { //nolint:lll
var blockStoreDB dbm.DB var blockStoreDB dbm.DB
blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config})
blockStoreDB, err = dbProvider(&config.DBContext{ID: "blockstore", Config: cfg})
if err != nil { if err != nil {
return return
} }
blockStore = store.NewBlockStore(blockStoreDB) blockStore = store.NewBlockStore(blockStoreDB)
stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config})
stateDB, err = dbProvider(&config.DBContext{ID: "state", Config: cfg})
return return
} }
@ -70,13 +71,13 @@ func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
} }
func createAndStartIndexerService( func createAndStartIndexerService(
config *cfg.Config,
dbProvider cfg.DBProvider,
cfg *config.Config,
dbProvider config.DBProvider,
eventBus *types.EventBus, eventBus *types.EventBus,
logger log.Logger, logger log.Logger,
chainID string, chainID string,
) (*indexer.Service, []indexer.EventSink, error) { ) (*indexer.Service, []indexer.EventSink, error) {
eventSinks, err := sink.EventSinksFromConfig(config, dbProvider, chainID)
eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, chainID)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -100,7 +101,7 @@ func doHandshake(
proxyApp proxy.AppConns, proxyApp proxy.AppConns,
consensusLogger log.Logger) error { consensusLogger log.Logger) error {
handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
handshaker := consensus.NewHandshaker(stateStore, state, blockStore, genDoc)
handshaker.SetLogger(consensusLogger) handshaker.SetLogger(consensusLogger)
handshaker.SetEventBus(eventBus) handshaker.SetEventBus(eventBus)
if err := handshaker.Handshake(proxyApp); err != nil { if err := handshaker.Handshake(proxyApp); err != nil {
@ -126,9 +127,9 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL
) )
} }
switch { switch {
case mode == cfg.ModeFull:
case mode == config.ModeFull:
consensusLogger.Info("This node is a fullnode") consensusLogger.Info("This node is a fullnode")
case mode == cfg.ModeValidator:
case mode == config.ModeValidator:
addr := pubKey.Address() addr := pubKey.Address()
// Log whether this node is a validator or an observer // Log whether this node is a validator or an observer
if state.Validators.HasAddress(addr) { if state.Validators.HasAddress(addr) {
@ -149,7 +150,7 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
} }
func createMempoolReactor( func createMempoolReactor(
config *cfg.Config,
cfg *config.Config,
proxyApp proxy.AppConns, proxyApp proxy.AppConns,
state sm.State, state sm.State,
memplMetrics *mempool.Metrics, memplMetrics *mempool.Metrics,
@ -158,8 +159,8 @@ func createMempoolReactor(
logger log.Logger, logger log.Logger,
) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) { ) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) {
logger = logger.With("module", "mempool", "version", config.Mempool.Version)
channelShims := mempoolv0.GetChannelShims(config.Mempool)
logger = logger.With("module", "mempool", "version", cfg.Mempool.Version)
channelShims := mempoolv0.GetChannelShims(cfg.Mempool)
reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims)
var ( var (
@ -167,7 +168,7 @@ func createMempoolReactor(
peerUpdates *p2p.PeerUpdates peerUpdates *p2p.PeerUpdates
) )
if config.P2P.UseLegacy {
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim) channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates peerUpdates = reactorShim.PeerUpdates
} else { } else {
@ -175,10 +176,10 @@ func createMempoolReactor(
peerUpdates = peerManager.Subscribe() peerUpdates = peerManager.Subscribe()
} }
switch config.Mempool.Version {
case cfg.MempoolV0:
switch cfg.Mempool.Version {
case config.MempoolV0:
mp := mempoolv0.NewCListMempool( mp := mempoolv0.NewCListMempool(
config.Mempool,
cfg.Mempool,
proxyApp.Mempool(), proxyApp.Mempool(),
state.LastBlockHeight, state.LastBlockHeight,
mempoolv0.WithMetrics(memplMetrics), mempoolv0.WithMetrics(memplMetrics),
@ -190,23 +191,23 @@ func createMempoolReactor(
reactor := mempoolv0.NewReactor( reactor := mempoolv0.NewReactor(
logger, logger,
config.Mempool,
cfg.Mempool,
peerManager, peerManager,
mp, mp,
channels[mempool.MempoolChannel], channels[mempool.MempoolChannel],
peerUpdates, peerUpdates,
) )
if config.Consensus.WaitForTxs() {
if cfg.Consensus.WaitForTxs() {
mp.EnableTxsAvailable() mp.EnableTxsAvailable()
} }
return reactorShim, reactor, mp, nil return reactorShim, reactor, mp, nil
case cfg.MempoolV1:
case config.MempoolV1:
mp := mempoolv1.NewTxMempool( mp := mempoolv1.NewTxMempool(
logger, logger,
config.Mempool,
cfg.Mempool,
proxyApp.Mempool(), proxyApp.Mempool(),
state.LastBlockHeight, state.LastBlockHeight,
mempoolv1.WithMetrics(memplMetrics), mempoolv1.WithMetrics(memplMetrics),
@ -216,34 +217,34 @@ func createMempoolReactor(
reactor := mempoolv1.NewReactor( reactor := mempoolv1.NewReactor(
logger, logger,
config.Mempool,
cfg.Mempool,
peerManager, peerManager,
mp, mp,
channels[mempool.MempoolChannel], channels[mempool.MempoolChannel],
peerUpdates, peerUpdates,
) )
if config.Consensus.WaitForTxs() {
if cfg.Consensus.WaitForTxs() {
mp.EnableTxsAvailable() mp.EnableTxsAvailable()
} }
return reactorShim, reactor, mp, nil return reactorShim, reactor, mp, nil
default: default:
return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", config.Mempool.Version)
return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version)
} }
} }
func createEvidenceReactor( func createEvidenceReactor(
config *cfg.Config,
dbProvider cfg.DBProvider,
cfg *config.Config,
dbProvider config.DBProvider,
stateDB dbm.DB, stateDB dbm.DB,
blockStore *store.BlockStore, blockStore *store.BlockStore,
peerManager *p2p.PeerManager, peerManager *p2p.PeerManager,
router *p2p.Router, router *p2p.Router,
logger log.Logger, logger log.Logger,
) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) { ) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) {
evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config})
evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg})
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@ -261,7 +262,7 @@ func createEvidenceReactor(
peerUpdates *p2p.PeerUpdates peerUpdates *p2p.PeerUpdates
) )
if config.P2P.UseLegacy {
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim) channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates peerUpdates = reactorShim.PeerUpdates
} else { } else {
@ -281,21 +282,21 @@ func createEvidenceReactor(
func createBlockchainReactor( func createBlockchainReactor(
logger log.Logger, logger log.Logger,
config *cfg.Config,
cfg *config.Config,
state sm.State, state sm.State,
blockExec *sm.BlockExecutor, blockExec *sm.BlockExecutor,
blockStore *store.BlockStore, blockStore *store.BlockStore,
csReactor *cs.Reactor,
csReactor *consensus.Reactor,
peerManager *p2p.PeerManager, peerManager *p2p.PeerManager,
router *p2p.Router, router *p2p.Router,
blockSync bool, blockSync bool,
metrics *cs.Metrics,
metrics *consensus.Metrics,
) (*p2p.ReactorShim, service.Service, error) { ) (*p2p.ReactorShim, service.Service, error) {
logger = logger.With("module", "blockchain") logger = logger.With("module", "blockchain")
switch config.BlockSync.Version {
case cfg.BlockSyncV0:
switch cfg.BlockSync.Version {
case config.BlockSyncV0:
reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims)
var ( var (
@ -303,7 +304,7 @@ func createBlockchainReactor(
peerUpdates *p2p.PeerUpdates peerUpdates *p2p.PeerUpdates
) )
if config.P2P.UseLegacy {
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim) channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates peerUpdates = reactorShim.PeerUpdates
} else { } else {
@ -322,69 +323,69 @@ func createBlockchainReactor(
return reactorShim, reactor, nil return reactorShim, reactor, nil
case cfg.BlockSyncV2:
case config.BlockSyncV2:
return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0") return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0")
default: default:
return nil, nil, fmt.Errorf("unknown block sync version %s", config.BlockSync.Version)
return nil, nil, fmt.Errorf("unknown block sync version %s", cfg.BlockSync.Version)
} }
} }
func createConsensusReactor( func createConsensusReactor(
config *cfg.Config,
cfg *config.Config,
state sm.State, state sm.State,
blockExec *sm.BlockExecutor, blockExec *sm.BlockExecutor,
blockStore sm.BlockStore, blockStore sm.BlockStore,
mp mempool.Mempool, mp mempool.Mempool,
evidencePool *evidence.Pool, evidencePool *evidence.Pool,
privValidator types.PrivValidator, privValidator types.PrivValidator,
csMetrics *cs.Metrics,
csMetrics *consensus.Metrics,
waitSync bool, waitSync bool,
eventBus *types.EventBus, eventBus *types.EventBus,
peerManager *p2p.PeerManager, peerManager *p2p.PeerManager,
router *p2p.Router, router *p2p.Router,
logger log.Logger, logger log.Logger,
) (*p2p.ReactorShim, *cs.Reactor, *cs.State) {
) (*p2p.ReactorShim, *consensus.Reactor, *consensus.State) {
consensusState := cs.NewState(
config.Consensus,
consensusState := consensus.NewState(
cfg.Consensus,
state.Copy(), state.Copy(),
blockExec, blockExec,
blockStore, blockStore,
mp, mp,
evidencePool, evidencePool,
cs.StateMetrics(csMetrics),
consensus.StateMetrics(csMetrics),
) )
consensusState.SetLogger(logger) consensusState.SetLogger(logger)
if privValidator != nil && config.Mode == cfg.ModeValidator {
if privValidator != nil && cfg.Mode == config.ModeValidator {
consensusState.SetPrivValidator(privValidator) consensusState.SetPrivValidator(privValidator)
} }
reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", cs.ChannelShims)
reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", consensus.ChannelShims)
var ( var (
channels map[p2p.ChannelID]*p2p.Channel channels map[p2p.ChannelID]*p2p.Channel
peerUpdates *p2p.PeerUpdates peerUpdates *p2p.PeerUpdates
) )
if config.P2P.UseLegacy {
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim) channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates peerUpdates = reactorShim.PeerUpdates
} else { } else {
channels = makeChannelsFromShims(router, cs.ChannelShims)
channels = makeChannelsFromShims(router, consensus.ChannelShims)
peerUpdates = peerManager.Subscribe() peerUpdates = peerManager.Subscribe()
} }
reactor := cs.NewReactor(
reactor := consensus.NewReactor(
logger, logger,
consensusState, consensusState,
channels[cs.StateChannel],
channels[cs.DataChannel],
channels[cs.VoteChannel],
channels[cs.VoteSetBitsChannel],
channels[consensus.StateChannel],
channels[consensus.DataChannel],
channels[consensus.VoteChannel],
channels[consensus.VoteSetBitsChannel],
peerUpdates, peerUpdates,
waitSync, waitSync,
cs.ReactorMetrics(csMetrics),
consensus.ReactorMetrics(csMetrics),
) )
// Services which will be publishing and/or subscribing for messages (events) // Services which will be publishing and/or subscribing for messages (events)
@ -394,20 +395,20 @@ func createConsensusReactor(
return reactorShim, reactor, consensusState return reactorShim, reactor, consensusState
} }
func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport {
func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport {
return p2p.NewMConnTransport( return p2p.NewMConnTransport(
logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{},
logger, p2p.MConnConfig(cfg.P2P), []*p2p.ChannelDescriptor{},
p2p.MConnTransportOptions{ p2p.MConnTransportOptions{
MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers +
len(tmstrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")),
MaxAcceptedConnections: uint32(cfg.P2P.MaxNumInboundPeers +
len(tmstrings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")),
), ),
}, },
) )
} }
func createPeerManager( func createPeerManager(
config *cfg.Config,
dbProvider cfg.DBProvider,
cfg *config.Config,
dbProvider config.DBProvider,
p2pLogger log.Logger, p2pLogger log.Logger,
nodeID types.NodeID, nodeID types.NodeID,
) (*p2p.PeerManager, error) { ) (*p2p.PeerManager, error) {
@ -415,16 +416,16 @@ func createPeerManager(
var maxConns uint16 var maxConns uint16
switch { switch {
case config.P2P.MaxConnections > 0:
maxConns = config.P2P.MaxConnections
case cfg.P2P.MaxConnections > 0:
maxConns = cfg.P2P.MaxConnections
case config.P2P.MaxNumInboundPeers > 0 && config.P2P.MaxNumOutboundPeers > 0:
x := config.P2P.MaxNumInboundPeers + config.P2P.MaxNumOutboundPeers
case cfg.P2P.MaxNumInboundPeers > 0 && cfg.P2P.MaxNumOutboundPeers > 0:
x := cfg.P2P.MaxNumInboundPeers + cfg.P2P.MaxNumOutboundPeers
if x > math.MaxUint16 { if x > math.MaxUint16 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)", "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)",
config.P2P.MaxNumInboundPeers,
config.P2P.MaxNumOutboundPeers,
cfg.P2P.MaxNumInboundPeers,
cfg.P2P.MaxNumOutboundPeers,
math.MaxUint16, math.MaxUint16,
) )
} }
@ -436,7 +437,7 @@ func createPeerManager(
} }
privatePeerIDs := make(map[types.NodeID]struct{}) privatePeerIDs := make(map[types.NodeID]struct{})
for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") {
for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") {
privatePeerIDs[types.NodeID(id)] = struct{}{} privatePeerIDs[types.NodeID(id)] = struct{}{}
} }
@ -452,7 +453,7 @@ func createPeerManager(
} }
peers := []p2p.NodeAddress{} peers := []p2p.NodeAddress{}
for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") {
for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ") {
address, err := p2p.ParseNodeAddress(p) address, err := p2p.ParseNodeAddress(p)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid peer address %q: %w", p, err) return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
@ -462,7 +463,7 @@ func createPeerManager(
options.PersistentPeers = append(options.PersistentPeers, address.NodeID) options.PersistentPeers = append(options.PersistentPeers, address.NodeID)
} }
for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") {
for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.BootstrapPeers, ",", " ") {
address, err := p2p.ParseNodeAddress(p) address, err := p2p.ParseNodeAddress(p)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid peer address %q: %w", p, err) return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
@ -470,7 +471,7 @@ func createPeerManager(
peers = append(peers, address) peers = append(peers, address)
} }
peerDB, err := dbProvider(&cfg.DBContext{ID: "peerstore", Config: config})
peerDB, err := dbProvider(&config.DBContext{ID: "peerstore", Config: cfg})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -511,7 +512,7 @@ func createRouter(
} }
func createSwitch( func createSwitch(
config *cfg.Config,
cfg *config.Config,
transport p2p.Transport, transport p2p.Transport,
p2pMetrics *p2p.Metrics, p2pMetrics *p2p.Metrics,
mempoolReactor *p2p.ReactorShim, mempoolReactor *p2p.ReactorShim,
@ -530,13 +531,13 @@ func createSwitch(
peerFilters = []p2p.PeerFilterFunc{} peerFilters = []p2p.PeerFilterFunc{}
) )
if !config.P2P.AllowDuplicateIP {
if !cfg.P2P.AllowDuplicateIP {
connFilters = append(connFilters, p2p.ConnDuplicateIPFilter) connFilters = append(connFilters, p2p.ConnDuplicateIPFilter)
} }
// Filter peers by addr or pubkey with an ABCI query. // Filter peers by addr or pubkey with an ABCI query.
// If the query return code is OK, add peer. // If the query return code is OK, add peer.
if config.FilterPeers {
if cfg.FilterPeers {
connFilters = append( connFilters = append(
connFilters, connFilters,
// ABCI query for address filtering. // ABCI query for address filtering.
@ -575,7 +576,7 @@ func createSwitch(
} }
sw := p2p.NewSwitch( sw := p2p.NewSwitch(
config.P2P,
cfg.P2P,
transport, transport,
p2p.WithMetrics(p2pMetrics), p2p.WithMetrics(p2pMetrics),
p2p.SwitchPeerFilters(peerFilters...), p2p.SwitchPeerFilters(peerFilters...),
@ -583,7 +584,7 @@ func createSwitch(
) )
sw.SetLogger(p2pLogger) sw.SetLogger(p2pLogger)
if config.Mode != cfg.ModeSeed {
if cfg.Mode != config.ModeSeed {
sw.AddReactor("MEMPOOL", mempoolReactor) sw.AddReactor("MEMPOOL", mempoolReactor)
sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("BLOCKCHAIN", bcReactor)
sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("CONSENSUS", consensusReactor)
@ -594,26 +595,26 @@ func createSwitch(
sw.SetNodeInfo(nodeInfo) sw.SetNodeInfo(nodeInfo)
sw.SetNodeKey(nodeKey) sw.SetNodeKey(nodeKey)
p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile())
p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", cfg.NodeKeyFile())
return sw return sw
} }
func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
func createAddrBookAndSetOnSwitch(cfg *config.Config, sw *p2p.Switch,
p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) {
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
addrBook := pex.NewAddrBook(cfg.P2P.AddrBookFile(), cfg.P2P.AddrBookStrict)
addrBook.SetLogger(p2pLogger.With("book", cfg.P2P.AddrBookFile()))
// Add ourselves to addrbook to prevent dialing ourselves // Add ourselves to addrbook to prevent dialing ourselves
if config.P2P.ExternalAddress != "" {
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress))
if cfg.P2P.ExternalAddress != "" {
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ExternalAddress))
if err != nil { if err != nil {
return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
} }
addrBook.AddOurAddress(addr) addrBook.AddOurAddress(addr)
} }
if config.P2P.ListenAddress != "" {
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress))
if cfg.P2P.ListenAddress != "" {
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ListenAddress))
if err != nil { if err != nil {
return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
} }
@ -625,19 +626,19 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
return addrBook, nil return addrBook, nil
} }
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, cfg *config.Config,
sw *p2p.Switch, logger log.Logger) *pex.Reactor { sw *p2p.Switch, logger log.Logger) *pex.Reactor {
reactorConfig := &pex.ReactorConfig{ reactorConfig := &pex.ReactorConfig{
Seeds: tmstrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "),
SeedMode: config.Mode == cfg.ModeSeed,
Seeds: tmstrings.SplitAndTrimEmpty(cfg.P2P.Seeds, ",", " "),
SeedMode: cfg.Mode == config.ModeSeed,
// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
// blocks assuming 10s blocks ~ 28 hours. // blocks assuming 10s blocks ~ 28 hours.
// TODO (melekes): make it dynamic based on the actual block latencies // TODO (melekes): make it dynamic based on the actual block latencies
// from the live network. // from the live network.
// https://github.com/tendermint/tendermint/issues/3523 // https://github.com/tendermint/tendermint/issues/3523
SeedDisconnectWaitPeriod: 28 * time.Hour, SeedDisconnectWaitPeriod: 28 * time.Hour,
PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
PersistentPeersMaxDialPeriod: cfg.P2P.PersistentPeersMaxDialPeriod,
} }
// TODO persistent peers ? so we can have their DNS addrs saved // TODO persistent peers ? so we can have their DNS addrs saved
pexReactor := pex.NewReactor(addrBook, reactorConfig) pexReactor := pex.NewReactor(addrBook, reactorConfig)
@ -647,7 +648,7 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
} }
func createPEXReactorV2( func createPEXReactorV2(
config *cfg.Config,
cfg *config.Config,
logger log.Logger, logger log.Logger,
peerManager *p2p.PeerManager, peerManager *p2p.PeerManager,
router *p2p.Router, router *p2p.Router,
@ -663,7 +664,7 @@ func createPEXReactorV2(
} }
func makeNodeInfo( func makeNodeInfo(
config *cfg.Config,
cfg *config.Config,
nodeKey types.NodeKey, nodeKey types.NodeKey,
eventSinks []indexer.EventSink, eventSinks []indexer.EventSink,
genDoc *types.GenesisDoc, genDoc *types.GenesisDoc,
@ -676,15 +677,15 @@ func makeNodeInfo(
} }
var bcChannel byte var bcChannel byte
switch config.BlockSync.Version {
case cfg.BlockSyncV0:
switch cfg.BlockSync.Version {
case config.BlockSyncV0:
bcChannel = byte(bcv0.BlockSyncChannel) bcChannel = byte(bcv0.BlockSyncChannel)
case cfg.BlockSyncV2:
case config.BlockSyncV2:
bcChannel = bcv2.BlockchainChannel bcChannel = bcv2.BlockchainChannel
default: default:
return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version)
return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", cfg.BlockSync.Version)
} }
nodeInfo := types.NodeInfo{ nodeInfo := types.NodeInfo{
@ -698,10 +699,10 @@ func makeNodeInfo(
Version: version.TMVersion, Version: version.TMVersion,
Channels: []byte{ Channels: []byte{
bcChannel, bcChannel,
byte(cs.StateChannel),
byte(cs.DataChannel),
byte(cs.VoteChannel),
byte(cs.VoteSetBitsChannel),
byte(consensus.StateChannel),
byte(consensus.DataChannel),
byte(consensus.VoteChannel),
byte(consensus.VoteSetBitsChannel),
byte(mempool.MempoolChannel), byte(mempool.MempoolChannel),
byte(evidence.EvidenceChannel), byte(evidence.EvidenceChannel),
byte(statesync.SnapshotChannel), byte(statesync.SnapshotChannel),
@ -709,21 +710,21 @@ func makeNodeInfo(
byte(statesync.LightBlockChannel), byte(statesync.LightBlockChannel),
byte(statesync.ParamsChannel), byte(statesync.ParamsChannel),
}, },
Moniker: config.Moniker,
Moniker: cfg.Moniker,
Other: types.NodeInfoOther{ Other: types.NodeInfoOther{
TxIndex: txIndexerStatus, TxIndex: txIndexerStatus,
RPCAddress: config.RPC.ListenAddress,
RPCAddress: cfg.RPC.ListenAddress,
}, },
} }
if config.P2P.PexReactor {
if cfg.P2P.PexReactor {
nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
} }
lAddr := config.P2P.ExternalAddress
lAddr := cfg.P2P.ExternalAddress
if lAddr == "" { if lAddr == "" {
lAddr = config.P2P.ListenAddress
lAddr = cfg.P2P.ListenAddress
} }
nodeInfo.ListenAddr = lAddr nodeInfo.ListenAddr = lAddr
@ -733,7 +734,7 @@ func makeNodeInfo(
} }
func makeSeedNodeInfo( func makeSeedNodeInfo(
config *cfg.Config,
cfg *config.Config,
nodeKey types.NodeKey, nodeKey types.NodeKey,
genDoc *types.GenesisDoc, genDoc *types.GenesisDoc,
state sm.State, state sm.State,
@ -748,21 +749,21 @@ func makeSeedNodeInfo(
Network: genDoc.ChainID, Network: genDoc.ChainID,
Version: version.TMVersion, Version: version.TMVersion,
Channels: []byte{}, Channels: []byte{},
Moniker: config.Moniker,
Moniker: cfg.Moniker,
Other: types.NodeInfoOther{ Other: types.NodeInfoOther{
TxIndex: "off", TxIndex: "off",
RPCAddress: config.RPC.ListenAddress,
RPCAddress: cfg.RPC.ListenAddress,
}, },
} }
if config.P2P.PexReactor {
if cfg.P2P.PexReactor {
nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
} }
lAddr := config.P2P.ExternalAddress
lAddr := cfg.P2P.ExternalAddress
if lAddr == "" { if lAddr == "" {
lAddr = config.P2P.ListenAddress
lAddr = cfg.P2P.ListenAddress
} }
nodeInfo.ListenAddr = lAddr nodeInfo.ListenAddr = lAddr


+ 2
- 2
privval/grpc/client.go View File

@ -7,7 +7,7 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
@ -62,7 +62,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) {
return nil, errStatus.Err() return nil, errStatus.Err()
} }
pk, err := cryptoenc.PubKeyFromProto(resp.PubKey)
pk, err := encoding.PubKeyFromProto(resp.PubKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }


+ 2
- 2
privval/grpc/server.go View File

@ -7,7 +7,7 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -44,7 +44,7 @@ func (ss *SignerServer) GetPubKey(ctx context.Context, req *privvalproto.PubKeyR
return nil, status.Errorf(codes.NotFound, "error getting pubkey: %v", err) return nil, status.Errorf(codes.NotFound, "error getting pubkey: %v", err)
} }
pk, err := cryptoenc.PubKeyToProto(pubKey)
pk, err := encoding.PubKeyToProto(pubKey)
if err != nil { if err != nil {
return nil, status.Errorf(codes.Internal, "error transitioning pubkey to proto: %v", err) return nil, status.Errorf(codes.Internal, "error transitioning pubkey to proto: %v", err)
} }


+ 6
- 6
privval/grpc/util.go View File

@ -11,7 +11,7 @@ import (
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net" tmnet "github.com/tendermint/tendermint/libs/net"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
@ -88,15 +88,15 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption {
// DialRemoteSigner is a generalized function to dial the gRPC server. // DialRemoteSigner is a generalized function to dial the gRPC server.
func DialRemoteSigner( func DialRemoteSigner(
config *cfg.PrivValidatorConfig,
cfg *config.PrivValidatorConfig,
chainID string, chainID string,
logger log.Logger, logger log.Logger,
usePrometheus bool, usePrometheus bool,
) (*SignerClient, error) { ) (*SignerClient, error) {
var transportSecurity grpc.DialOption var transportSecurity grpc.DialOption
if config.AreSecurityOptionsPresent() {
transportSecurity = GenerateTLS(config.ClientCertificateFile(),
config.ClientKeyFile(), config.RootCAFile(), logger)
if cfg.AreSecurityOptionsPresent() {
transportSecurity = GenerateTLS(cfg.ClientCertificateFile(),
cfg.ClientKeyFile(), cfg.RootCAFile(), logger)
} else { } else {
transportSecurity = grpc.WithInsecure() transportSecurity = grpc.WithInsecure()
logger.Info("Using an insecure gRPC connection!") logger.Info("Using an insecure gRPC connection!")
@ -111,7 +111,7 @@ func DialRemoteSigner(
dialOptions = append(dialOptions, transportSecurity) dialOptions = append(dialOptions, transportSecurity)
ctx := context.Background() ctx := context.Background()
_, address := tmnet.ProtocolAndAddress(config.ListenAddr)
_, address := tmnet.ProtocolAndAddress(cfg.ListenAddr)
conn, err := grpc.DialContext(ctx, address, dialOptions...) conn, err := grpc.DialContext(ctx, address, dialOptions...)
if err != nil { if err != nil {
logger.Error("unable to connect to server", "target", address, "err", err) logger.Error("unable to connect to server", "target", address, "err", err)


+ 2
- 2
privval/msgs_test.go View File

@ -10,7 +10,7 @@ import (
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/tmhash"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
privproto "github.com/tendermint/tendermint/proto/tendermint/privval" privproto "github.com/tendermint/tendermint/proto/tendermint/privval"
@ -60,7 +60,7 @@ func exampleProposal() *types.Proposal {
// nolint:lll // ignore line length for tests // nolint:lll // ignore line length for tests
func TestPrivvalVectors(t *testing.T) { func TestPrivvalVectors(t *testing.T) {
pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey()
ppk, err := cryptoenc.PubKeyToProto(pk)
ppk, err := encoding.PubKeyToProto(pk)
require.NoError(t, err) require.NoError(t, err)
// Generate a simple vote // Generate a simple vote


+ 3
- 3
privval/secret_connection.go View File

@ -23,7 +23,7 @@ import (
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/libs/protoio" "github.com/tendermint/tendermint/internal/libs/protoio"
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/async" "github.com/tendermint/tendermint/libs/async"
@ -408,7 +408,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte
// Send our info and receive theirs in tandem. // Send our info and receive theirs in tandem.
var trs, _ = async.Parallel( var trs, _ = async.Parallel(
func(_ int) (val interface{}, abort bool, err error) { func(_ int) (val interface{}, abort bool, err error) {
pbpk, err := cryptoenc.PubKeyToProto(pubKey)
pbpk, err := encoding.PubKeyToProto(pubKey)
if err != nil { if err != nil {
return nil, true, err return nil, true, err
} }
@ -425,7 +425,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte
return nil, true, err // abort return nil, true, err // abort
} }
pk, err := cryptoenc.PubKeyFromProto(pba.PubKey)
pk, err := encoding.PubKeyFromProto(pba.PubKey)
if err != nil { if err != nil {
return nil, true, err // abort return nil, true, err // abort
} }


+ 2
- 2
privval/signer_client.go View File

@ -6,7 +6,7 @@ import (
"time" "time"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -83,7 +83,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) {
return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description}
} }
pk, err := cryptoenc.PubKeyFromProto(resp.PubKey)
pk, err := encoding.PubKeyFromProto(resp.PubKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }


+ 2
- 2
privval/signer_requestHandler.go View File

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
@ -37,7 +37,7 @@ func DefaultValidationRequestHandler(
if err != nil { if err != nil {
return res, err return res, err
} }
pk, err := cryptoenc.PubKeyToProto(pubKey)
pk, err := encoding.PubKeyToProto(pubKey)
if err != nil { if err != nil {
return res, err return res, err
} }


+ 2
- 2
proto/tendermint/blocksync/message.go View File

@ -2,9 +2,9 @@ package blocksync
import ( import (
"errors" "errors"
fmt "fmt"
"fmt"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
) )
const ( const (


+ 1
- 1
proto/tendermint/blocksync/message_test.go View File

@ -5,7 +5,7 @@ import (
math "math" math "math"
"testing" "testing"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"


+ 2
- 2
proto/tendermint/consensus/message.go View File

@ -1,9 +1,9 @@
package consensus package consensus
import ( import (
fmt "fmt"
"fmt"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
) )
// Wrap implements the p2p Wrapper interface and wraps a consensus proto message. // Wrap implements the p2p Wrapper interface and wraps a consensus proto message.


+ 2
- 2
proto/tendermint/mempool/message.go View File

@ -1,9 +1,9 @@
package mempool package mempool
import ( import (
fmt "fmt"
"fmt"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
) )
// Wrap implements the p2p Wrapper interface and wraps a mempool message. // Wrap implements the p2p Wrapper interface and wraps a mempool message.


+ 2
- 2
proto/tendermint/p2p/pex.go View File

@ -1,9 +1,9 @@
package p2p package p2p
import ( import (
fmt "fmt"
"fmt"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
) )
// Wrap implements the p2p Wrapper interface and wraps a PEX message. // Wrap implements the p2p Wrapper interface and wraps a PEX message.


+ 2
- 2
proto/tendermint/statesync/message.go View File

@ -2,9 +2,9 @@ package statesync
import ( import (
"errors" "errors"
fmt "fmt"
"fmt"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
) )
// Wrap implements the p2p Wrapper interface and wraps a state sync proto message. // Wrap implements the p2p Wrapper interface and wraps a state sync proto message.


+ 1
- 1
proto/tendermint/statesync/message_test.go View File

@ -4,7 +4,7 @@ import (
"encoding/hex" "encoding/hex"
"testing" "testing"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"


+ 2
- 2
rpc/client/event_test.go View File

@ -13,7 +13,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
tmrand "github.com/tendermint/tendermint/libs/rand" tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -127,7 +127,7 @@ func testTxEventsSent(t *testing.T, broadcastMethod string) {
// send // send
go func() { go func() {
var ( var (
txres *ctypes.ResultBroadcastTx
txres *coretypes.ResultBroadcastTx
err error err error
ctx = context.Background() ctx = context.Background()
) )


+ 2
- 2
rpc/client/evidence_test.go View File

@ -11,7 +11,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/tmhash"
tmrand "github.com/tendermint/tendermint/libs/rand" tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/privval"
@ -150,7 +150,7 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) {
err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) err = abci.ReadMessage(bytes.NewReader(qres.Value), &v)
require.NoError(t, err, "Error reading query result, value %v", qres.Value) require.NoError(t, err, "Error reading query result, value %v", qres.Value)
pk, err := cryptoenc.PubKeyFromProto(v.PubKey)
pk, err := encoding.PubKeyFromProto(v.PubKey)
require.NoError(t, err) require.NoError(t, err)
require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value))


+ 2
- 2
rpc/client/examples_test.go View File

@ -8,7 +8,7 @@ import (
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpchttp "github.com/tendermint/tendermint/rpc/client/http"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctest "github.com/tendermint/tendermint/rpc/test" rpctest "github.com/tendermint/tendermint/rpc/test"
) )
@ -138,7 +138,7 @@ func ExampleHTTP_batching() {
// Each result in the returned list is the deserialized result of each // Each result in the returned list is the deserialized result of each
// respective ABCIQuery response // respective ABCIQuery response
for _, result := range results { for _, result := range results {
qr, ok := result.(*ctypes.ResultABCIQuery)
qr, ok := result.(*coretypes.ResultABCIQuery)
if !ok { if !ok {
log.Fatal("invalid result type from ABCIQuery request") log.Fatal("invalid result type from ABCIQuery request")
} }


+ 5
- 5
rpc/client/helpers_test.go View File

@ -10,7 +10,7 @@ import (
"github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/client/mock" "github.com/tendermint/tendermint/rpc/client/mock"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
) )
func TestWaitForHeight(t *testing.T) { func TestWaitForHeight(t *testing.T) {
@ -33,7 +33,7 @@ func TestWaitForHeight(t *testing.T) {
// now set current block height to 10 // now set current block height to 10
m.Call = mock.Call{ m.Call = mock.Call{
Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10}},
Response: &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 10}},
} }
// we will not wait for more than 10 blocks // we will not wait for more than 10 blocks
@ -53,7 +53,7 @@ func TestWaitForHeight(t *testing.T) {
// we use the callback to update the status height // we use the callback to update the status height
myWaiter := func(delta int64) error { myWaiter := func(delta int64) error {
// update the height for the next call // update the height for the next call
m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}}
m.Call.Response = &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 15}}
return client.DefaultWaitStrategy(delta) return client.DefaultWaitStrategy(delta)
} }
@ -65,13 +65,13 @@ func TestWaitForHeight(t *testing.T) {
pre := r.Calls[3] pre := r.Calls[3]
require.Nil(pre.Error) require.Nil(pre.Error)
prer, ok := pre.Response.(*ctypes.ResultStatus)
prer, ok := pre.Response.(*coretypes.ResultStatus)
require.True(ok) require.True(ok)
assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight)
post := r.Calls[4] post := r.Calls[4]
require.Nil(post.Error) require.Nil(post.Error)
postr, ok := post.Response.(*ctypes.ResultStatus)
postr, ok := post.Response.(*coretypes.ResultStatus)
require.True(ok) require.True(ok)
assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight)
} }

+ 54
- 54
rpc/client/http/http.go View File

@ -7,7 +7,7 @@ import (
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -198,8 +198,8 @@ func (b *BatchHTTP) Count() int {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// baseRPCClient // baseRPCClient
func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
result := new(ctypes.ResultStatus)
func (c *baseRPCClient) Status(ctx context.Context) (*coretypes.ResultStatus, error) {
result := new(coretypes.ResultStatus)
_, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -208,8 +208,8 @@ func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error
return result, nil return result, nil
} }
func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
result := new(ctypes.ResultABCIInfo)
func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) {
result := new(coretypes.ResultABCIInfo)
_, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -222,7 +222,7 @@ func (c *baseRPCClient) ABCIQuery(
ctx context.Context, ctx context.Context,
path string, path string,
data bytes.HexBytes, data bytes.HexBytes,
) (*ctypes.ResultABCIQuery, error) {
) (*coretypes.ResultABCIQuery, error) {
return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions)
} }
@ -230,8 +230,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions(
ctx context.Context, ctx context.Context,
path string, path string,
data bytes.HexBytes, data bytes.HexBytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
result := new(ctypes.ResultABCIQuery)
opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) {
result := new(coretypes.ResultABCIQuery)
_, err := c.caller.Call(ctx, "abci_query", _, err := c.caller.Call(ctx, "abci_query",
map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove},
result) result)
@ -245,8 +245,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions(
func (c *baseRPCClient) BroadcastTxCommit( func (c *baseRPCClient) BroadcastTxCommit(
ctx context.Context, ctx context.Context,
tx types.Tx, tx types.Tx,
) (*ctypes.ResultBroadcastTxCommit, error) {
result := new(ctypes.ResultBroadcastTxCommit)
) (*coretypes.ResultBroadcastTxCommit, error) {
result := new(coretypes.ResultBroadcastTxCommit)
_, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -257,14 +257,14 @@ func (c *baseRPCClient) BroadcastTxCommit(
func (c *baseRPCClient) BroadcastTxAsync( func (c *baseRPCClient) BroadcastTxAsync(
ctx context.Context, ctx context.Context,
tx types.Tx, tx types.Tx,
) (*ctypes.ResultBroadcastTx, error) {
) (*coretypes.ResultBroadcastTx, error) {
return c.broadcastTX(ctx, "broadcast_tx_async", tx) return c.broadcastTX(ctx, "broadcast_tx_async", tx)
} }
func (c *baseRPCClient) BroadcastTxSync( func (c *baseRPCClient) BroadcastTxSync(
ctx context.Context, ctx context.Context,
tx types.Tx, tx types.Tx,
) (*ctypes.ResultBroadcastTx, error) {
) (*coretypes.ResultBroadcastTx, error) {
return c.broadcastTX(ctx, "broadcast_tx_sync", tx) return c.broadcastTX(ctx, "broadcast_tx_sync", tx)
} }
@ -272,8 +272,8 @@ func (c *baseRPCClient) broadcastTX(
ctx context.Context, ctx context.Context,
route string, route string,
tx types.Tx, tx types.Tx,
) (*ctypes.ResultBroadcastTx, error) {
result := new(ctypes.ResultBroadcastTx)
) (*coretypes.ResultBroadcastTx, error) {
result := new(coretypes.ResultBroadcastTx)
_, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -284,8 +284,8 @@ func (c *baseRPCClient) broadcastTX(
func (c *baseRPCClient) UnconfirmedTxs( func (c *baseRPCClient) UnconfirmedTxs(
ctx context.Context, ctx context.Context,
limit *int, limit *int,
) (*ctypes.ResultUnconfirmedTxs, error) {
result := new(ctypes.ResultUnconfirmedTxs)
) (*coretypes.ResultUnconfirmedTxs, error) {
result := new(coretypes.ResultUnconfirmedTxs)
params := make(map[string]interface{}) params := make(map[string]interface{})
if limit != nil { if limit != nil {
params["limit"] = limit params["limit"] = limit
@ -297,8 +297,8 @@ func (c *baseRPCClient) UnconfirmedTxs(
return result, nil return result, nil
} }
func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) {
result := new(ctypes.ResultUnconfirmedTxs)
func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) {
result := new(coretypes.ResultUnconfirmedTxs)
_, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -306,8 +306,8 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUn
return result, nil return result, nil
} }
func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
result := new(ctypes.ResultCheckTx)
func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) {
result := new(coretypes.ResultCheckTx)
_, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -315,8 +315,8 @@ func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.Resul
return result, nil return result, nil
} }
func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
result := new(ctypes.ResultNetInfo)
func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) {
result := new(coretypes.ResultNetInfo)
_, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -324,8 +324,8 @@ func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, err
return result, nil return result, nil
} }
func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
result := new(ctypes.ResultDumpConsensusState)
func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) {
result := new(coretypes.ResultDumpConsensusState)
_, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -333,8 +333,8 @@ func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultD
return result, nil return result, nil
} }
func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
result := new(ctypes.ResultConsensusState)
func (c *baseRPCClient) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) {
result := new(coretypes.ResultConsensusState)
_, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -345,8 +345,8 @@ func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConse
func (c *baseRPCClient) ConsensusParams( func (c *baseRPCClient) ConsensusParams(
ctx context.Context, ctx context.Context,
height *int64, height *int64,
) (*ctypes.ResultConsensusParams, error) {
result := new(ctypes.ResultConsensusParams)
) (*coretypes.ResultConsensusParams, error) {
result := new(coretypes.ResultConsensusParams)
params := make(map[string]interface{}) params := make(map[string]interface{})
if height != nil { if height != nil {
params["height"] = height params["height"] = height
@ -358,8 +358,8 @@ func (c *baseRPCClient) ConsensusParams(
return result, nil return result, nil
} }
func (c *baseRPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
result := new(ctypes.ResultHealth)
func (c *baseRPCClient) Health(ctx context.Context) (*coretypes.ResultHealth, error) {
result := new(coretypes.ResultHealth)
_, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -371,8 +371,8 @@ func (c *baseRPCClient) BlockchainInfo(
ctx context.Context, ctx context.Context,
minHeight, minHeight,
maxHeight int64, maxHeight int64,
) (*ctypes.ResultBlockchainInfo, error) {
result := new(ctypes.ResultBlockchainInfo)
) (*coretypes.ResultBlockchainInfo, error) {
result := new(coretypes.ResultBlockchainInfo)
_, err := c.caller.Call(ctx, "blockchain", _, err := c.caller.Call(ctx, "blockchain",
map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight},
result) result)
@ -382,8 +382,8 @@ func (c *baseRPCClient) BlockchainInfo(
return result, nil return result, nil
} }
func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
result := new(ctypes.ResultGenesis)
func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) {
result := new(coretypes.ResultGenesis)
_, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -391,8 +391,8 @@ func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, err
return result, nil return result, nil
} }
func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) {
result := new(ctypes.ResultGenesisChunk)
func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) {
result := new(coretypes.ResultGenesisChunk)
_, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result)
if err != nil { if err != nil {
return nil, err return nil, err
@ -400,8 +400,8 @@ func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.Re
return result, nil return result, nil
} }
func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
result := new(ctypes.ResultBlock)
func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) {
result := new(coretypes.ResultBlock)
params := make(map[string]interface{}) params := make(map[string]interface{})
if height != nil { if height != nil {
params["height"] = height params["height"] = height
@ -413,8 +413,8 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.Resul
return result, nil return result, nil
} }
func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) {
result := new(ctypes.ResultBlock)
func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) {
result := new(coretypes.ResultBlock)
params := map[string]interface{}{ params := map[string]interface{}{
"hash": hash, "hash": hash,
} }
@ -428,8 +428,8 @@ func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*
func (c *baseRPCClient) BlockResults( func (c *baseRPCClient) BlockResults(
ctx context.Context, ctx context.Context,
height *int64, height *int64,
) (*ctypes.ResultBlockResults, error) {
result := new(ctypes.ResultBlockResults)
) (*coretypes.ResultBlockResults, error) {
result := new(coretypes.ResultBlockResults)
params := make(map[string]interface{}) params := make(map[string]interface{})
if height != nil { if height != nil {
params["height"] = height params["height"] = height
@ -441,8 +441,8 @@ func (c *baseRPCClient) BlockResults(
return result, nil return result, nil
} }
func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
result := new(ctypes.ResultCommit)
func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) {
result := new(coretypes.ResultCommit)
params := make(map[string]interface{}) params := make(map[string]interface{})
if height != nil { if height != nil {
params["height"] = height params["height"] = height
@ -454,8 +454,8 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.Resu
return result, nil return result, nil
} }
func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) {
result := new(ctypes.ResultTx)
func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) {
result := new(coretypes.ResultTx)
params := map[string]interface{}{ params := map[string]interface{}{
"hash": hash, "hash": hash,
"prove": prove, "prove": prove,
@ -474,9 +474,9 @@ func (c *baseRPCClient) TxSearch(
page, page,
perPage *int, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultTxSearch, error) {
) (*coretypes.ResultTxSearch, error) {
result := new(ctypes.ResultTxSearch)
result := new(coretypes.ResultTxSearch)
params := map[string]interface{}{ params := map[string]interface{}{
"query": query, "query": query,
"prove": prove, "prove": prove,
@ -503,9 +503,9 @@ func (c *baseRPCClient) BlockSearch(
query string, query string,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultBlockSearch, error) {
) (*coretypes.ResultBlockSearch, error) {
result := new(ctypes.ResultBlockSearch)
result := new(coretypes.ResultBlockSearch)
params := map[string]interface{}{ params := map[string]interface{}{
"query": query, "query": query,
"order_by": orderBy, "order_by": orderBy,
@ -531,8 +531,8 @@ func (c *baseRPCClient) Validators(
height *int64, height *int64,
page, page,
perPage *int, perPage *int,
) (*ctypes.ResultValidators, error) {
result := new(ctypes.ResultValidators)
) (*coretypes.ResultValidators, error) {
result := new(coretypes.ResultValidators)
params := make(map[string]interface{}) params := make(map[string]interface{})
if page != nil { if page != nil {
params["page"] = page params["page"] = page
@ -553,8 +553,8 @@ func (c *baseRPCClient) Validators(
func (c *baseRPCClient) BroadcastEvidence( func (c *baseRPCClient) BroadcastEvidence(
ctx context.Context, ctx context.Context,
ev types.Evidence, ev types.Evidence,
) (*ctypes.ResultBroadcastEvidence, error) {
result := new(ctypes.ResultBroadcastEvidence)
) (*coretypes.ResultBroadcastEvidence, error) {
result := new(coretypes.ResultBroadcastEvidence)
_, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result)
if err != nil { if err != nil {
return nil, err return nil, err


+ 7
- 7
rpc/client/http/ws.go View File

@ -9,9 +9,9 @@ import (
tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmsync "github.com/tendermint/tendermint/internal/libs/sync"
tmjson "github.com/tendermint/tendermint/libs/json" tmjson "github.com/tendermint/tendermint/libs/json"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/pubsub"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client"
) )
@ -53,7 +53,7 @@ type wsEvents struct {
} }
type wsSubscription struct { type wsSubscription struct {
res chan ctypes.ResultEvent
res chan coretypes.ResultEvent
id string id string
query string query string
} }
@ -119,7 +119,7 @@ func (w *wsEvents) Stop() error { return w.ws.Stop() }
// //
// It returns an error if wsEvents is not running. // It returns an error if wsEvents is not running.
func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string,
outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) {
if !w.IsRunning() { if !w.IsRunning() {
return nil, rpcclient.ErrClientNotRunning return nil, rpcclient.ErrClientNotRunning
@ -134,7 +134,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string,
outCap = outCapacity[0] outCap = outCapacity[0]
} }
outc := make(chan ctypes.ResultEvent, outCap)
outc := make(chan coretypes.ResultEvent, outCap)
w.mtx.Lock() w.mtx.Lock()
defer w.mtx.Unlock() defer w.mtx.Unlock()
// subscriber param is ignored because Tendermint will override it with // subscriber param is ignored because Tendermint will override it with
@ -213,7 +213,7 @@ func (w *wsEvents) redoSubscriptionsAfter(d time.Duration) {
} }
func isErrAlreadySubscribed(err error) bool { func isErrAlreadySubscribed(err error) bool {
return strings.Contains(err.Error(), tmpubsub.ErrAlreadySubscribed.Error())
return strings.Contains(err.Error(), pubsub.ErrAlreadySubscribed.Error())
} }
func (w *wsEvents) eventListener() { func (w *wsEvents) eventListener() {
@ -238,7 +238,7 @@ func (w *wsEvents) eventListener() {
continue continue
} }
result := new(ctypes.ResultEvent)
result := new(coretypes.ResultEvent)
err := tmjson.Unmarshal(resp.Result, result) err := tmjson.Unmarshal(resp.Result, result)
if err != nil { if err != nil {
w.Logger.Error("failed to unmarshal response", "err", err) w.Logger.Error("failed to unmarshal response", "err", err)


+ 29
- 29
rpc/client/interface.go View File

@ -24,7 +24,7 @@ import (
"context" "context"
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -61,26 +61,26 @@ type Client interface {
// is easier to mock. // is easier to mock.
type ABCIClient interface { type ABCIClient interface {
// Reading from abci app // Reading from abci app
ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error)
ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error)
ABCIInfo(context.Context) (*coretypes.ResultABCIInfo, error)
ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error)
ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes,
opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error)
opts ABCIQueryOptions) (*coretypes.ResultABCIQuery, error)
// Writing to abci app // Writing to abci app
BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error)
BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error)
BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error)
BroadcastTxCommit(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error)
BroadcastTxAsync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)
BroadcastTxSync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)
} }
// SignClient groups together the functionality needed to get valid signatures // SignClient groups together the functionality needed to get valid signatures
// and prove anything about the chain. // and prove anything about the chain.
type SignClient interface { type SignClient interface {
Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error)
BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error)
BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error)
Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error)
Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error)
Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error)
Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error)
BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error)
BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error)
Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error)
Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error)
Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error)
// TxSearch defines a method to search for a paginated set of transactions by // TxSearch defines a method to search for a paginated set of transactions by
// DeliverTx event search criteria. // DeliverTx event search criteria.
@ -90,7 +90,7 @@ type SignClient interface {
prove bool, prove bool,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultTxSearch, error)
) (*coretypes.ResultTxSearch, error)
// BlockSearch defines a method to search for a paginated set of blocks by // BlockSearch defines a method to search for a paginated set of blocks by
// BeginBlock and EndBlock event search criteria. // BeginBlock and EndBlock event search criteria.
@ -99,29 +99,29 @@ type SignClient interface {
query string, query string,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultBlockSearch, error)
) (*coretypes.ResultBlockSearch, error)
} }
// HistoryClient provides access to data from genesis to now in large chunks. // HistoryClient provides access to data from genesis to now in large chunks.
type HistoryClient interface { type HistoryClient interface {
Genesis(context.Context) (*ctypes.ResultGenesis, error)
GenesisChunked(context.Context, uint) (*ctypes.ResultGenesisChunk, error)
BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error)
Genesis(context.Context) (*coretypes.ResultGenesis, error)
GenesisChunked(context.Context, uint) (*coretypes.ResultGenesisChunk, error)
BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error)
} }
// StatusClient provides access to general chain info. // StatusClient provides access to general chain info.
type StatusClient interface { type StatusClient interface {
Status(context.Context) (*ctypes.ResultStatus, error)
Status(context.Context) (*coretypes.ResultStatus, error)
} }
// NetworkClient is general info about the network state. May not be needed // NetworkClient is general info about the network state. May not be needed
// usually. // usually.
type NetworkClient interface { type NetworkClient interface {
NetInfo(context.Context) (*ctypes.ResultNetInfo, error)
DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error)
ConsensusState(context.Context) (*ctypes.ResultConsensusState, error)
ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error)
Health(context.Context) (*ctypes.ResultHealth, error)
NetInfo(context.Context) (*coretypes.ResultNetInfo, error)
DumpConsensusState(context.Context) (*coretypes.ResultDumpConsensusState, error)
ConsensusState(context.Context) (*coretypes.ResultConsensusState, error)
ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error)
Health(context.Context) (*coretypes.ResultHealth, error)
} }
// EventsClient is reactive, you can subscribe to any message, given the proper // EventsClient is reactive, you can subscribe to any message, given the proper
@ -134,7 +134,7 @@ type EventsClient interface {
// //
// ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe
// or UnsubscribeAll. // or UnsubscribeAll.
Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error)
Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) //nolint:lll
// Unsubscribe unsubscribes given subscriber from query. // Unsubscribe unsubscribes given subscriber from query.
Unsubscribe(ctx context.Context, subscriber, query string) error Unsubscribe(ctx context.Context, subscriber, query string) error
// UnsubscribeAll unsubscribes given subscriber from all the queries. // UnsubscribeAll unsubscribes given subscriber from all the queries.
@ -143,15 +143,15 @@ type EventsClient interface {
// MempoolClient shows us data about current mempool state. // MempoolClient shows us data about current mempool state.
type MempoolClient interface { type MempoolClient interface {
UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error)
NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error)
CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error)
UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error)
NumUnconfirmedTxs(context.Context) (*coretypes.ResultUnconfirmedTxs, error)
CheckTx(context.Context, types.Tx) (*coretypes.ResultCheckTx, error)
} }
// EvidenceClient is used for submitting an evidence of the malicious // EvidenceClient is used for submitting an evidence of the malicious
// behavior. // behavior.
type EvidenceClient interface { type EvidenceClient interface {
BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
BroadcastEvidence(context.Context, types.Evidence) (*coretypes.ResultBroadcastEvidence, error)
} }
// RemoteClient is a Client, which can also return the remote network address. // RemoteClient is a Client, which can also return the remote network address.


+ 49
- 49
rpc/client/local/local.go View File

@ -9,10 +9,10 @@ import (
rpccore "github.com/tendermint/tendermint/internal/rpc/core" rpccore "github.com/tendermint/tendermint/internal/rpc/core"
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/pubsub/query"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -72,15 +72,15 @@ func (c *Local) SetLogger(l log.Logger) {
c.Logger = l c.Logger = l
} }
func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
func (c *Local) Status(ctx context.Context) (*coretypes.ResultStatus, error) {
return c.env.Status(c.ctx) return c.env.Status(c.ctx)
} }
func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
func (c *Local) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) {
return c.env.ABCIInfo(c.ctx) return c.env.ABCIInfo(c.ctx)
} }
func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) {
return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions)
} }
@ -88,55 +88,55 @@ func (c *Local) ABCIQueryWithOptions(
ctx context.Context, ctx context.Context,
path string, path string,
data bytes.HexBytes, data bytes.HexBytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) {
return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
} }
func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) {
return c.env.BroadcastTxCommit(c.ctx, tx) return c.env.BroadcastTxCommit(c.ctx, tx)
} }
func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
return c.env.BroadcastTxAsync(c.ctx, tx) return c.env.BroadcastTxAsync(c.ctx, tx)
} }
func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
return c.env.BroadcastTxSync(c.ctx, tx) return c.env.BroadcastTxSync(c.ctx, tx)
} }
func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) {
func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) {
return c.env.UnconfirmedTxs(c.ctx, limit) return c.env.UnconfirmedTxs(c.ctx, limit)
} }
func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) {
func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) {
return c.env.NumUnconfirmedTxs(c.ctx) return c.env.NumUnconfirmedTxs(c.ctx)
} }
func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) {
return c.env.CheckTx(c.ctx, tx) return c.env.CheckTx(c.ctx, tx)
} }
func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
func (c *Local) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) {
return c.env.NetInfo(c.ctx) return c.env.NetInfo(c.ctx)
} }
func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
func (c *Local) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) {
return c.env.DumpConsensusState(c.ctx) return c.env.DumpConsensusState(c.ctx)
} }
func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
func (c *Local) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) {
return c.env.GetConsensusState(c.ctx) return c.env.GetConsensusState(c.ctx)
} }
func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) {
return c.env.ConsensusParams(c.ctx, height) return c.env.ConsensusParams(c.ctx, height)
} }
func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) {
return c.env.Health(c.ctx) return c.env.Health(c.ctx)
} }
func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) {
return c.env.UnsafeDialSeeds(c.ctx, seeds) return c.env.UnsafeDialSeeds(c.ctx, seeds)
} }
@ -146,76 +146,76 @@ func (c *Local) DialPeers(
persistent, persistent,
unconditional, unconditional,
private bool, private bool,
) (*ctypes.ResultDialPeers, error) {
) (*coretypes.ResultDialPeers, error) {
return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
} }
func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll
return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight)
} }
func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
func (c *Local) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) {
return c.env.Genesis(c.ctx) return c.env.Genesis(c.ctx)
} }
func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) {
func (c *Local) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) {
return c.env.GenesisChunked(c.ctx, id) return c.env.GenesisChunked(c.ctx, id)
} }
func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
func (c *Local) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) {
return c.env.Block(c.ctx, height) return c.env.Block(c.ctx, height)
} }
func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) {
func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) {
return c.env.BlockByHash(c.ctx, hash) return c.env.BlockByHash(c.ctx, hash)
} }
func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) {
func (c *Local) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) {
return c.env.BlockResults(c.ctx, height) return c.env.BlockResults(c.ctx, height)
} }
func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
func (c *Local) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) {
return c.env.Commit(c.ctx, height) return c.env.Commit(c.ctx, height)
} }
func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) {
func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll
return c.env.Validators(c.ctx, height, page, perPage) return c.env.Validators(c.ctx, height, page, perPage)
} }
func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) {
func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) {
return c.env.Tx(c.ctx, hash, prove) return c.env.Tx(c.ctx, hash, prove)
} }
func (c *Local) TxSearch( func (c *Local) TxSearch(
_ context.Context, _ context.Context,
query string,
queryString string,
prove bool, prove bool,
page, page,
perPage *int, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultTxSearch, error) {
return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy)
) (*coretypes.ResultTxSearch, error) {
return c.env.TxSearch(c.ctx, queryString, prove, page, perPage, orderBy)
} }
func (c *Local) BlockSearch( func (c *Local) BlockSearch(
_ context.Context, _ context.Context,
query string,
queryString string,
page, perPage *int, page, perPage *int,
orderBy string, orderBy string,
) (*ctypes.ResultBlockSearch, error) {
return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy)
) (*coretypes.ResultBlockSearch, error) {
return c.env.BlockSearch(c.ctx, queryString, page, perPage, orderBy)
} }
func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) {
return c.env.BroadcastEvidence(c.ctx, ev) return c.env.BroadcastEvidence(c.ctx, ev)
} }
func (c *Local) Subscribe( func (c *Local) Subscribe(
ctx context.Context, ctx context.Context,
subscriber, subscriber,
query string,
outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
q, err := tmquery.New(query)
queryString string,
outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) {
q, err := query.New(queryString)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse query: %w", err) return nil, fmt.Errorf("failed to parse query: %w", err)
} }
@ -235,7 +235,7 @@ func (c *Local) Subscribe(
return nil, fmt.Errorf("failed to subscribe: %w", err) return nil, fmt.Errorf("failed to subscribe: %w", err)
} }
outc := make(chan ctypes.ResultEvent, outCap)
outc := make(chan coretypes.ResultEvent, outCap)
go c.eventsRoutine(sub, subscriber, q, outc) go c.eventsRoutine(sub, subscriber, q, outc)
return outc, nil return outc, nil
@ -244,12 +244,12 @@ func (c *Local) Subscribe(
func (c *Local) eventsRoutine( func (c *Local) eventsRoutine(
sub types.Subscription, sub types.Subscription,
subscriber string, subscriber string,
q tmpubsub.Query,
outc chan<- ctypes.ResultEvent) {
q pubsub.Query,
outc chan<- coretypes.ResultEvent) {
for { for {
select { select {
case msg := <-sub.Out(): case msg := <-sub.Out():
result := ctypes.ResultEvent{
result := coretypes.ResultEvent{
SubscriptionID: msg.SubscriptionID(), SubscriptionID: msg.SubscriptionID(),
Query: q.String(), Query: q.String(),
Data: msg.Data(), Data: msg.Data(),
@ -266,7 +266,7 @@ func (c *Local) eventsRoutine(
} }
} }
case <-sub.Canceled(): case <-sub.Canceled():
if sub.Err() == tmpubsub.ErrUnsubscribed {
if sub.Err() == pubsub.ErrUnsubscribed {
return return
} }
@ -282,7 +282,7 @@ func (c *Local) eventsRoutine(
} }
// Try to resubscribe with exponential backoff. // Try to resubscribe with exponential backoff.
func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscription {
func (c *Local) resubscribe(subscriber string, q pubsub.Query) types.Subscription {
attempts := 0 attempts := 0
for { for {
if !c.IsRunning() { if !c.IsRunning() {
@ -299,17 +299,17 @@ func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscript
} }
} }
func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error {
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber}
func (c *Local) Unsubscribe(ctx context.Context, subscriber, queryString string) error {
args := pubsub.UnsubscribeArgs{Subscriber: subscriber}
var err error var err error
args.Query, err = tmquery.New(query)
args.Query, err = query.New(queryString)
if err != nil { if err != nil {
// if this isn't a valid query it might be an ID, so // if this isn't a valid query it might be an ID, so
// we'll try that. It'll turn into an error when we // we'll try that. It'll turn into an error when we
// try to unsubscribe. Eventually, perhaps, we'll want // try to unsubscribe. Eventually, perhaps, we'll want
// to change the interface to only allow // to change the interface to only allow
// unsubscription by ID, but that's a larger change. // unsubscription by ID, but that's a larger change.
args.ID = query
args.ID = queryString
} }
return c.EventBus.Unsubscribe(ctx, args) return c.EventBus.Unsubscribe(ctx, args)
} }


+ 29
- 29
rpc/client/mock/abci.go View File

@ -7,7 +7,7 @@ import (
"github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/proxy"
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -24,11 +24,11 @@ var (
_ client.ABCIClient = (*ABCIRecorder)(nil) _ client.ABCIClient = (*ABCIRecorder)(nil)
) )
func (a ABCIApp) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil
func (a ABCIApp) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) {
return &coretypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil
} }
func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) {
return a.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) return a.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions)
} }
@ -36,21 +36,21 @@ func (a ABCIApp) ABCIQueryWithOptions(
ctx context.Context, ctx context.Context,
path string, path string,
data bytes.HexBytes, data bytes.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) {
q := a.App.Query(abci.RequestQuery{ q := a.App.Query(abci.RequestQuery{
Data: data, Data: data,
Path: path, Path: path,
Height: opts.Height, Height: opts.Height,
Prove: opts.Prove, Prove: opts.Prove,
}) })
return &ctypes.ResultABCIQuery{Response: q}, nil
return &coretypes.ResultABCIQuery{Response: q}, nil
} }
// NOTE: Caller should call a.App.Commit() separately, // NOTE: Caller should call a.App.Commit() separately,
// this function does not actually wait for a commit. // this function does not actually wait for a commit.
// TODO: Make it wait for a commit and set res.Height appropriately. // TODO: Make it wait for a commit and set res.Height appropriately.
func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res := ctypes.ResultBroadcastTxCommit{}
func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) {
res := coretypes.ResultBroadcastTxCommit{}
res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
if res.CheckTx.IsErr() { if res.CheckTx.IsErr() {
return &res, nil return &res, nil
@ -60,13 +60,13 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.Re
return &res, nil return &res, nil
} }
func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
// and this gets written in a background thread... // and this gets written in a background thread...
if !c.IsErr() { if !c.IsErr() {
go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }()
} }
return &ctypes.ResultBroadcastTx{
return &coretypes.ResultBroadcastTx{
Code: c.Code, Code: c.Code,
Data: c.Data, Data: c.Data,
Log: c.Log, Log: c.Log,
@ -75,13 +75,13 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.Res
}, nil }, nil
} }
func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
// and this gets written in a background thread... // and this gets written in a background thread...
if !c.IsErr() { if !c.IsErr() {
go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }()
} }
return &ctypes.ResultBroadcastTx{
return &coretypes.ResultBroadcastTx{
Code: c.Code, Code: c.Code,
Data: c.Data, Data: c.Data,
Log: c.Log, Log: c.Log,
@ -100,15 +100,15 @@ type ABCIMock struct {
Broadcast Call Broadcast Call
} }
func (m ABCIMock) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
func (m ABCIMock) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) {
res, err := m.Info.GetResponse(nil) res, err := m.Info.GetResponse(nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil
return &coretypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil
} }
func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) {
return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions)
} }
@ -116,37 +116,37 @@ func (m ABCIMock) ABCIQueryWithOptions(
ctx context.Context, ctx context.Context,
path string, path string,
data bytes.HexBytes, data bytes.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) {
res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove})
if err != nil { if err != nil {
return nil, err return nil, err
} }
resQuery := res.(abci.ResponseQuery) resQuery := res.(abci.ResponseQuery)
return &ctypes.ResultABCIQuery{Response: resQuery}, nil
return &coretypes.ResultABCIQuery{Response: resQuery}, nil
} }
func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) {
res, err := m.BroadcastCommit.GetResponse(tx) res, err := m.BroadcastCommit.GetResponse(tx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return res.(*ctypes.ResultBroadcastTxCommit), nil
return res.(*coretypes.ResultBroadcastTxCommit), nil
} }
func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
res, err := m.Broadcast.GetResponse(tx) res, err := m.Broadcast.GetResponse(tx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return res.(*ctypes.ResultBroadcastTx), nil
return res.(*coretypes.ResultBroadcastTx), nil
} }
func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
res, err := m.Broadcast.GetResponse(tx) res, err := m.Broadcast.GetResponse(tx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return res.(*ctypes.ResultBroadcastTx), nil
return res.(*coretypes.ResultBroadcastTx), nil
} }
// ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client) // ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client)
@ -174,7 +174,7 @@ func (r *ABCIRecorder) addCall(call Call) {
r.Calls = append(r.Calls, call) r.Calls = append(r.Calls, call)
} }
func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) {
res, err := r.Client.ABCIInfo(ctx) res, err := r.Client.ABCIInfo(ctx)
r.addCall(Call{ r.addCall(Call{
Name: "abci_info", Name: "abci_info",
@ -188,7 +188,7 @@ func (r *ABCIRecorder) ABCIQuery(
ctx context.Context, ctx context.Context,
path string, path string,
data bytes.HexBytes, data bytes.HexBytes,
) (*ctypes.ResultABCIQuery, error) {
) (*coretypes.ResultABCIQuery, error) {
return r.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) return r.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions)
} }
@ -196,7 +196,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions(
ctx context.Context, ctx context.Context,
path string, path string,
data bytes.HexBytes, data bytes.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) {
res, err := r.Client.ABCIQueryWithOptions(ctx, path, data, opts) res, err := r.Client.ABCIQueryWithOptions(ctx, path, data, opts)
r.addCall(Call{ r.addCall(Call{
Name: "abci_query", Name: "abci_query",
@ -207,7 +207,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions(
return res, err return res, err
} }
func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) {
res, err := r.Client.BroadcastTxCommit(ctx, tx) res, err := r.Client.BroadcastTxCommit(ctx, tx)
r.addCall(Call{ r.addCall(Call{
Name: "broadcast_tx_commit", Name: "broadcast_tx_commit",
@ -218,7 +218,7 @@ func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*cty
return res, err return res, err
} }
func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
res, err := r.Client.BroadcastTxAsync(ctx, tx) res, err := r.Client.BroadcastTxAsync(ctx, tx)
r.addCall(Call{ r.addCall(Call{
Name: "broadcast_tx_async", Name: "broadcast_tx_async",
@ -229,7 +229,7 @@ func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctyp
return res, err return res, err
} }
func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) {
res, err := r.Client.BroadcastTxSync(ctx, tx) res, err := r.Client.BroadcastTxSync(ctx, tx)
r.addCall(Call{ r.addCall(Call{
Name: "broadcast_tx_sync", Name: "broadcast_tx_sync",


+ 3
- 3
rpc/client/mock/abci_test.go View File

@ -14,7 +14,7 @@ import (
"github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/client/mock" "github.com/tendermint/tendermint/rpc/client/mock"
ctypes "github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -36,7 +36,7 @@ func TestABCIMock(t *testing.T) {
// Broadcast commit depends on call // Broadcast commit depends on call
BroadcastCommit: mock.Call{ BroadcastCommit: mock.Call{
Args: goodTx, Args: goodTx,
Response: &ctypes.ResultBroadcastTxCommit{
Response: &coretypes.ResultBroadcastTxCommit{
CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")},
DeliverTx: abci.ResponseDeliverTx{Data: bytes.HexBytes("deliver")}, DeliverTx: abci.ResponseDeliverTx{Data: bytes.HexBytes("deliver")},
}, },
@ -112,7 +112,7 @@ func TestABCIRecorder(t *testing.T) {
assert.Nil(info.Error) assert.Nil(info.Error)
assert.Nil(info.Args) assert.Nil(info.Args)
require.NotNil(info.Response) require.NotNil(info.Response)
ir, ok := info.Response.(*ctypes.ResultABCIInfo)
ir, ok := info.Response.(*coretypes.ResultABCIInfo)
require.True(ok) require.True(ok)
assert.Equal("data", ir.Response.Data) assert.Equal("data", ir.Response.Data)
assert.Equal("v0.9.9", ir.Response.Version) assert.Equal("v0.9.9", ir.Response.Version)


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save