Browse Source

Merge pull request #3326 from tendermint/release/v0.30.1

Release/v0.30.1
pull/3339/head v0.30.1
Ethan Buchman 6 years ago
committed by GitHub
parent
commit
e0f8936455
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 894 additions and 406 deletions
  1. +3
    -1
      .github/CODEOWNERS
  2. +0
    -2
      .golangci.yml
  3. +29
    -1
      CHANGELOG.md
  4. +11
    -11
      abci/client/grpc_client.go
  5. +0
    -1
      abci/cmd/abci-cli/abci-cli.go
  6. +2
    -4
      blockchain/reactor.go
  7. +3
    -0
      blockchain/reactor_test.go
  8. +29
    -10
      blockchain/store_test.go
  9. +0
    -2
      cmd/tendermint/commands/run_node.go
  10. +1
    -2
      cmd/tendermint/commands/show_node_id.go
  11. +17
    -5
      cmd/tendermint/commands/show_validator.go
  12. +16
    -25
      config/toml.go
  13. +1
    -0
      config/toml_test.go
  14. +2
    -5
      consensus/byzantine_test.go
  15. +39
    -11
      consensus/common_test.go
  16. +4
    -4
      consensus/mempool_test.go
  17. +1
    -1
      consensus/reactor.go
  18. +13
    -11
      consensus/reactor_test.go
  19. +1
    -1
      consensus/replay.go
  20. +0
    -2
      consensus/replay_file.go
  21. +41
    -25
      consensus/replay_test.go
  22. +6
    -5
      consensus/state.go
  23. +62
    -7
      consensus/state_test.go
  24. +5
    -2
      consensus/types/height_vote_set_test.go
  25. +10
    -4
      consensus/wal.go
  26. +8
    -20
      consensus/wal_generator.go
  27. +22
    -2
      consensus/wal_test.go
  28. +2
    -0
      crypto/encoding/amino/encode_test.go
  29. +3
    -1
      crypto/merkle/proof_key_path_test.go
  30. +2
    -2
      crypto/secp256k1/secp256k1_nocgo.go
  31. +4
    -0
      docs/spec/abci/apps.md
  32. +1
    -1
      libs/common/bit_array.go
  33. +1
    -1
      libs/db/remotedb/doc.go
  34. +0
    -8
      libs/db/remotedb/grpcdb/client.go
  35. +1
    -1
      libs/db/remotedb/remotedb_test.go
  36. +17
    -23
      libs/db/remotedb/test.crt
  37. +25
    -25
      libs/db/remotedb/test.key
  38. +2
    -2
      lite/client/provider_test.go
  39. +1
    -2
      lite/proxy/query_test.go
  40. +2
    -1
      mempool/bench_test.go
  41. +22
    -10
      mempool/mempool_test.go
  42. +2
    -1
      mempool/reactor_test.go
  43. +5
    -0
      node/node.go
  44. +7
    -0
      node/node_test.go
  45. +4
    -5
      p2p/conn/secret_connection_test.go
  46. +24
    -6
      p2p/transport.go
  47. +44
    -15
      p2p/transport_test.go
  48. +0
    -1
      privval/file.go
  49. +1
    -1
      privval/server.go
  50. +2
    -2
      rpc/client/main_test.go
  51. +6
    -6
      rpc/client/mock/abci.go
  52. +1
    -1
      rpc/client/rpc_test.go
  53. +2
    -2
      rpc/core/abci.go
  54. +4
    -2
      rpc/core/blocks.go
  55. +10
    -4
      rpc/core/consensus.go
  56. +1
    -1
      rpc/core/events.go
  57. +1
    -1
      rpc/core/mempool.go
  58. +129
    -17
      rpc/core/net.go
  59. +1
    -2
      rpc/core/types/responses.go
  60. +2
    -2
      rpc/grpc/grpc_test.go
  61. +8
    -0
      rpc/test/helpers.go
  62. +1
    -1
      state/execution.go
  63. +27
    -0
      state/execution_test.go
  64. +6
    -5
      state/state_test.go
  65. +8
    -5
      tools/tm-bench/transacter.go
  66. +3
    -3
      tools/tm-bench/transacter_test.go
  67. +2
    -2
      tools/tm-monitor/monitor/node.go
  68. +4
    -2
      types/block_test.go
  69. +159
    -58
      types/validator_set_test.go
  70. +20
    -19
      types/vote_test.go
  71. +1
    -1
      version/version.go

+ 3
- 1
.github/CODEOWNERS View File

@ -4,4 +4,6 @@
* @ebuchman @melekes @xla
# Precious documentation
/docs/ @zramsay
/docs/README.md @zramsay
/docs/DOCS_README.md @zramsay
/docs/.vuepress/ @zramsay

+ 0
- 2
.golangci.yml View File

@ -18,9 +18,7 @@ linters:
- nakedret
- lll
- gochecknoglobals
- govet
- gocritic
- gosec
- gochecknoinits
- scopelint
- stylecheck


+ 29
- 1
CHANGELOG.md View File

@ -1,5 +1,33 @@
# Changelog
## v0.30.1
*February 20th, 2019*
This release fixes a consensus halt and a DataCorruptionError after restart
discovered in `game_of_stakes_6`. It also fixes a security issue in the p2p
handshake by authenticating the NetAddress.ID of the peer we're dialing.
### IMPROVEMENTS:
* [config] [\#3291](https://github.com/tendermint/tendermint/issues/3291) Make
config.ResetTestRootWithChainID() create concurrency-safe test directories.
### BUG FIXES:
* [consensus] [\#3295](https://github.com/tendermint/tendermint/issues/3295)
Flush WAL on stop to prevent data corruption during graceful shutdown.
* [consensus] [\#3302](https://github.com/tendermint/tendermint/issues/3302)
Fix possible halt by resetting TriggeredTimeoutPrecommit before starting next height.
* [rpc] [\#3251](https://github.com/tendermint/tendermint/issues/3251) Fix
`/net_info#peers#remote_ip` format. New format spec:
* dotted decimal ("192.0.2.1"), if ip is an IPv4 or IP4-mapped IPv6 address
* IPv6 ("2001:db8::1"), if ip is a valid IPv6 address
* [cmd] [\#3314](https://github.com/tendermint/tendermint/issues/3314) Return
an error on `show_validator` when the private validator file does not exist.
* [p2p] [\#3010](https://github.com/tendermint/tendermint/issues/3010#issuecomment-464287627)
Authenticate a peer against its NetAddress.ID when dialing.
## v0.30.0
*February 8th, 2019*
@ -89,7 +117,7 @@ Special thanks to external contributors on this release:
- [p2p] [\#3247](https://github.com/tendermint/tendermint/issues/3247) Fix panic in SeedMode when calling FlushStop and OnStop
concurrently
- [p2p] [\#3040](https://github.com/tendermint/tendermint/issues/3040) Fix MITM on secret connection by checking low-order points
- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket
- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket that was causing messages to be corrupted
## v0.29.1


+ 11
- 11
abci/client/grpc_client.go View File

@ -129,7 +129,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
}
func (cli *grpcClient) FlushAsync() *ReqRes {
@ -138,7 +138,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
}
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
@ -147,7 +147,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
}
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
@ -156,7 +156,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
}
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
@ -165,7 +165,7 @@ func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
}
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
@ -174,7 +174,7 @@ func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
}
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
@ -183,7 +183,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
}
func (cli *grpcClient) CommitAsync() *ReqRes {
@ -192,7 +192,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
}
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
@ -201,7 +201,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
}
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
@ -210,7 +210,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
}
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
@ -219,7 +219,7 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
}
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {


+ 0
- 1
abci/cmd/abci-cli/abci-cli.go View File

@ -394,7 +394,6 @@ func cmdConsole(cmd *cobra.Command, args []string) error {
return err
}
}
return nil
}
func muxOnCommands(cmd *cobra.Command, pArgs []string) error {


+ 2
- 4
blockchain/reactor.go View File

@ -8,7 +8,6 @@ import (
amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
@ -302,7 +301,7 @@ FOR_LOOP:
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
@ -338,8 +337,7 @@ FOR_LOOP:
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
blocksSynced++


+ 3
- 0
blockchain/reactor_test.go View File

@ -1,6 +1,7 @@
package blockchain
import (
"os"
"sort"
"testing"
"time"
@ -125,6 +126,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
func TestNoBlockResponse(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(65)
@ -184,6 +186,7 @@ func TestNoBlockResponse(t *testing.T) {
// that seems extreme.
func TestBadBlockStopsPeer(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(148)


+ 29
- 10
blockchain/store_test.go View File

@ -3,6 +3,7 @@ package blockchain
import (
"bytes"
"fmt"
"os"
"runtime/debug"
"strings"
"testing"
@ -21,13 +22,17 @@ import (
tmtime "github.com/tendermint/tendermint/types/time"
)
// A cleanupFunc cleans up any config / test files created for a particular
// test.
type cleanupFunc func()
// make a Commit with a single vote containing just the height and a timestamp
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}}
return types.NewCommit(types.BlockID{}, commitSigs)
}
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
@ -37,7 +42,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
if err != nil {
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
}
return state, NewBlockStore(blockDB)
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
}
func TestLoadBlockStoreStateJSON(t *testing.T) {
@ -87,19 +92,32 @@ func freshBlockStore() (*BlockStore, db.DB) {
}
var (
state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
state sm.State
block *types.Block
partSet *types.PartSet
part1 *types.Part
part2 *types.Part
seenCommit1 *types.Commit
)
block = makeBlock(1, state, new(types.Commit))
partSet = block.MakePartSet(2)
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
func TestMain(m *testing.M) {
var cleanup cleanupFunc
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
block = makeBlock(1, state, new(types.Commit))
partSet = block.MakePartSet(2)
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
seenCommit1 = makeTestCommit(10, tmtime.Now())
)
code := m.Run()
cleanup()
os.Exit(code)
}
// TODO: This test should be simplified ...
func TestBlockStoreSaveLoadBlock(t *testing.T) {
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
defer cleanup()
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
// check there are no blocks at various heights
@ -350,7 +368,8 @@ func TestLoadBlockMeta(t *testing.T) {
}
func TestBlockFetchAtHeight(t *testing.T) {
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
defer cleanup()
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
block := makeBlock(bs.Height()+1, state, new(types.Commit))


+ 0
- 2
cmd/tendermint/commands/run_node.go View File

@ -77,8 +77,6 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
// Run forever
select {}
return nil
},
}


+ 1
- 2
cmd/tendermint/commands/show_node_id.go View File

@ -16,12 +16,11 @@ var ShowNodeIDCmd = &cobra.Command{
}
func showNodeID(cmd *cobra.Command, args []string) error {
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
return err
}
fmt.Println(nodeKey.ID())
fmt.Println(nodeKey.ID())
return nil
}

+ 17
- 5
cmd/tendermint/commands/show_validator.go View File

@ -3,8 +3,10 @@ package commands
import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/privval"
)
@ -12,11 +14,21 @@ import (
var ShowValidatorCmd = &cobra.Command{
Use: "show_validator",
Short: "Show this node's validator info",
Run: showValidator,
RunE: showValidator,
}
func showValidator(cmd *cobra.Command, args []string) {
privValidator := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
fmt.Println(string(pubKeyJSONBytes))
func showValidator(cmd *cobra.Command, args []string) error {
keyFilePath := config.PrivValidatorKeyFile()
if !cmn.FileExists(keyFilePath) {
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
}
pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
bz, err := cdc.MarshalJSON(pv.GetPubKey())
if err != nil {
return errors.Wrap(err, "failed to marshal private validator pubkey")
}
fmt.Println(string(bz))
return nil
}

+ 16
- 25
config/toml.go View File

@ -3,13 +3,16 @@ package config
import (
"bytes"
"fmt"
"os"
"io/ioutil"
"path/filepath"
"text/template"
cmn "github.com/tendermint/tendermint/libs/common"
)
// DefaultDirPerm is the default permissions used when creating directories.
const DefaultDirPerm = 0700
var configTemplate *template.Template
func init() {
@ -24,13 +27,13 @@ func init() {
// EnsureRoot creates the root, config, and data directories if they don't exist,
// and panics if it fails.
func EnsureRoot(rootDir string) {
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
}
@ -322,29 +325,17 @@ func ResetTestRoot(testName string) *Config {
}
func ResetTestRootWithChainID(testName string, chainID string) *Config {
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
rootDir = filepath.Join(rootDir, testName)
// Remove ~/.tendermint_test_bak
if cmn.FileExists(rootDir + "_bak") {
if err := os.RemoveAll(rootDir + "_bak"); err != nil {
cmn.PanicSanity(err.Error())
}
}
// Move ~/.tendermint_test to ~/.tendermint_test_bak
if cmn.FileExists(rootDir) {
if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
cmn.PanicSanity(err.Error())
}
}
// Create new dir
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
cmn.PanicSanity(err.Error())
// create a unique, concurrency-safe test directory under os.TempDir()
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
if err != nil {
panic(err)
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
cmn.PanicSanity(err.Error())
// ensure config and data subdirs are created
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
panic(err)
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
cmn.PanicSanity(err.Error())
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
panic(err)
}
baseConfig := DefaultBaseConfig()


+ 1
- 0
config/toml_test.go View File

@ -48,6 +48,7 @@ func TestEnsureTestRoot(t *testing.T) {
// create root dir
cfg := ResetTestRoot(testName)
defer os.RemoveAll(cfg.RootDir)
rootDir := cfg.RootDir
// make sure config is set properly


+ 2
- 5
consensus/byzantine_test.go View File

@ -13,10 +13,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_byzantine_test")
}
//----------------------------------------------
// byzantine failures
@ -29,7 +25,8 @@ func init() {
func TestByzantine(t *testing.T) {
N := 4
logger := consensusLogger().With("test", "byzantine")
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
defer cleanup()
// give the byzantine validator a normal ticker
ticker := NewTimeoutTicker()


+ 39
- 11
consensus/common_test.go View File

@ -37,8 +37,13 @@ const (
testSubscriber = "test-client"
)
// A cleanupFunc cleans up any config / test files created for a particular
// test.
type cleanupFunc func()
// genesis, chain_id, priv_val
var config *cfg.Config // NOTE: must be reset for each _test.go file
var consensusReplayConfig *cfg.Config
var ensureTimeout = time.Millisecond * 100
func ensureDir(dir string, mode os.FileMode) {
@ -124,15 +129,21 @@ func startTestRound(cs *ConsensusState, height int64, round int) {
// Create proposal block from cs1 but sign it with vs
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) {
cs1.mtx.Lock()
block, blockParts := cs1.createProposalBlock()
cs1.mtx.Unlock()
if block == nil { // on error
panic("error creating proposal block")
}
// Make proposal
polRound, propBlockID := cs1.ValidRound, types.BlockID{block.Hash(), blockParts.Header()}
cs1.mtx.RLock()
validRound := cs1.ValidRound
chainID := cs1.state.ChainID
cs1.mtx.RUnlock()
polRound, propBlockID := validRound, types.BlockID{block.Hash(), blockParts.Header()}
proposal = types.NewProposal(height, round, polRound, propBlockID)
if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil {
if err := vs.SignProposal(chainID, proposal); err != nil {
panic(err)
}
return
@ -242,6 +253,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
// consensus states
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
config := cfg.ResetTestRoot("consensus_state_test")
return newConsensusStateWithConfig(config, state, pv, app)
}
@ -400,7 +412,7 @@ func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
}
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
timeoutDuration := time.Duration(timeout*3) * time.Nanosecond
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
"Timeout expired while waiting for NewTimeout event")
}
@ -554,14 +566,17 @@ func consensusLogger() log.Logger {
}).With("module", "consensus")
}
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState {
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*ConsensusState, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
css := make([]*ConsensusState, nValidators)
logger := consensusLogger()
configRootDirs := make([]string, 0, nValidators)
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
for _, opt := range configOpts {
opt(thisConfig)
}
@ -574,18 +589,26 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
return css
return css, func() {
for _, dir := range configRootDirs {
os.RemoveAll(dir)
}
}
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
css := make([]*ConsensusState, nPeers)
logger := consensusLogger()
configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
var privVal types.PrivValidator
if i < nValidators {
@ -611,7 +634,11 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
return css
return css, func() {
for _, dir := range configRootDirs {
os.RemoveAll(dir)
}
}
}
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
@ -621,7 +648,6 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
}
}
panic("didnt find peer in switches")
return -1
}
//-------------------------------------------------------------------------------
@ -699,8 +725,7 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
return m.c
}
func (mockTicker) SetLogger(log.Logger) {
}
func (*mockTicker) SetLogger(log.Logger) {}
//------------------------------------
@ -709,6 +734,9 @@ func newCounter() abci.Application {
}
func newPersistentKVStore() abci.Application {
dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore")
dir, err := ioutil.TempDir("", "persistent-kvstore")
if err != nil {
panic(err)
}
return kvstore.NewPersistentKVStoreApplication(dir)
}

+ 4
- 4
consensus/mempool_test.go View File

@ -3,6 +3,7 @@ package consensus
import (
"encoding/binary"
"fmt"
"os"
"testing"
"time"
@ -14,10 +15,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_mempool_test")
}
// for testing
func assertMempool(txn txNotifier) sm.Mempool {
return txn.(sm.Mempool)
@ -25,6 +22,7 @@ func assertMempool(txn txNotifier) sm.Mempool {
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
@ -43,6 +41,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
@ -58,6 +57,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
func TestMempoolProgressInHigherRound(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())


+ 1
- 1
consensus/reactor.go View File

@ -896,7 +896,7 @@ type PeerState struct {
peer p2p.Peer
logger log.Logger
mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly.
mtx sync.Mutex // NOTE: Modify below using setters, never directly.
PRS cstypes.PeerRoundState `json:"round_state"` // Exposed.
Stats *peerStateStats `json:"stats"` // Exposed.
}


+ 13
- 11
consensus/reactor_test.go View File

@ -27,10 +27,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_reactor_test")
}
//----------------------------------------------
// in-process testnets
@ -86,7 +82,8 @@ func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuse
// Ensure a testnet makes blocks
func TestReactorBasic(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
// wait till everyone makes the first new block
@ -116,6 +113,7 @@ func TestReactorWithEvidence(t *testing.T) {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
@ -218,10 +216,11 @@ func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false }
// Ensure a testnet makes blocks when there are txs
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
func(c *cfg.Config) {
c.Consensus.CreateEmptyBlocks = false
})
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@ -239,7 +238,8 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
// Test we record stats about votes and block parts from other peers.
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@ -263,7 +263,8 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
func TestReactorVotingPowerChange(t *testing.T) {
nVals := 4
logger := log.TestingLogger()
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
defer stopConsensusNet(logger, reactors, eventBuses)
@ -324,8 +325,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
func TestReactorValidatorSetChanges(t *testing.T) {
nPeers := 7
nVals := 4
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
defer cleanup()
logger := log.TestingLogger()
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
@ -422,7 +423,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
// Check we can make blocks with skip_timeout_commit=false
func TestReactorWithTimeoutCommit(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
defer cleanup()
// override default SkipTimeoutCommit == true for tests
for i := 0; i < N; i++ {
css[i].config.SkipTimeoutCommit = false


+ 1
- 1
consensus/replay.go View File

@ -334,7 +334,7 @@ func (h *Handshaker) ReplayBlocks(
} else if storeBlockHeight < appBlockHeight {
// the app should never be ahead of the store (but this is under app's control)
return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
} else if storeBlockHeight < stateBlockHeight {
// the state should never be ahead of the store (this is under tendermint's control)


+ 0
- 2
consensus/replay_file.go View File

@ -103,7 +103,6 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
}
pb.count++
}
return nil
}
//------------------------------------------------
@ -295,7 +294,6 @@ func (pb *playback) replayConsoleLoop() int {
fmt.Println(pb.count)
}
}
return 0
}
//--------------------------------------------------------------------------------


+ 41
- 25
consensus/replay_test.go View File

@ -17,23 +17,31 @@ import (
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
auto "github.com/tendermint/tendermint/libs/autofile"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/version"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
var consensusReplayConfig *cfg.Config
func init() {
func TestMain(m *testing.M) {
config = ResetConfig("consensus_reactor_test")
consensusReplayConfig = ResetConfig("consensus_replay_test")
configStateTest := ResetConfig("consensus_state_test")
configMempoolTest := ResetConfig("consensus_mempool_test")
configByzantineTest := ResetConfig("consensus_byzantine_test")
code := m.Run()
os.RemoveAll(config.RootDir)
os.RemoveAll(consensusReplayConfig.RootDir)
os.RemoveAll(configStateTest.RootDir)
os.RemoveAll(configMempoolTest.RootDir)
os.RemoveAll(configByzantineTest.RootDir)
os.Exit(code)
}
// These tests ensure we can always recover from failure at any part of the consensus process.
@ -51,7 +59,8 @@ func init() {
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
logger := log.TestingLogger()
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig)
@ -59,7 +68,6 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
cs.SetLogger(logger)
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
t.Logf("====== WAL: \n\r%X\n", bytes)
err := cs.Start()
@ -110,21 +118,22 @@ func TestWALCrash(t *testing.T) {
3},
}
for _, tc := range testCases {
for i, tc := range testCases {
consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i))
t.Run(tc.name, func(t *testing.T) {
crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop)
crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop)
})
}
}
func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
walPaniced := make(chan error)
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
i := 1
LOOP:
for {
// fmt.Printf("====== LOOP %d\n", i)
t.Logf("====== LOOP %d\n", i)
// create consensus state from a clean slate
@ -163,7 +172,7 @@ LOOP:
t.Logf("WAL paniced: %v", err)
// make sure we can make blocks after a crash
startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB)
startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
// stop consensus state and transactions sender (initFn)
cs.Stop()
@ -269,29 +278,37 @@ var modes = []uint{0, 1, 2}
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 0, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, 0, m)
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 1, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, 1, m)
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS-1, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, NUM_BLOCKS-1, m)
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, NUM_BLOCKS, m)
}
}
@ -311,10 +328,8 @@ func tempWALWithData(data []byte) string {
}
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
config := ResetConfig("proxy_test_")
walBody, err := WALWithNBlocks(NUM_BLOCKS)
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
walBody, err := WALWithNBlocks(t, NUM_BLOCKS)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
@ -631,6 +646,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
clientCreator := proxy.NewLocalClientCreator(app)
config := ResetConfig("proxy_test_")
defer os.RemoveAll(config.RootDir)
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)


+ 6
- 5
consensus/state.go View File

@ -566,6 +566,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
cs.CommitRound = -1
cs.LastCommit = lastPrecommits
cs.LastValidators = state.LastValidators
cs.TriggeredTimeoutPrecommit = false
cs.state = state
@ -909,7 +910,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
}
// Make proposal
propBlockId := types.BlockID{block.Hash(), blockParts.Header()}
propBlockId := types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId)
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil {
@ -1320,7 +1321,7 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
// Execute and commit the block, update and save the state, and update the mempool.
// NOTE The block.AppHash wont reflect these txs until the next block.
var err error
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, block)
if err != nil {
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err)
err := cmn.Kill()
@ -1543,7 +1544,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
}
cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
cs.evsw.FireEvent(types.EventVote, vote)
// if we can skip timeoutCommit and have all the votes now,
@ -1571,7 +1572,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
return
}
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
cs.evsw.FireEvent(types.EventVote, vote)
switch vote.Type {
@ -1683,7 +1684,7 @@ func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, heade
Round: cs.Round,
Timestamp: cs.voteTime(),
Type: type_,
BlockID: types.BlockID{hash, header},
BlockID: types.BlockID{Hash: hash, PartsHeader: header},
}
err := cs.privValidator.SignVote(cs.state.ChainID, vote)
return vote, err


+ 62
- 7
consensus/state_test.go View File

@ -18,10 +18,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_state_test")
}
/*
ProposeSuite
@ -1288,8 +1284,8 @@ func (n *fakeTxNotifier) Notify() {
}
func TestStartNextHeightCorrectly(t *testing.T) {
config.Consensus.SkipTimeoutCommit = false
cs1, vss := randConsensusState(4)
cs1.config.SkipTimeoutCommit = false
cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})}
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
@ -1326,13 +1322,14 @@ func TestStartNextHeightCorrectly(t *testing.T) {
// add precommits
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
time.Sleep(5 * time.Millisecond)
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
rs = cs1.GetRoundState()
assert.True(t, rs.TriggeredTimeoutPrecommit)
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
cs1.txNotifier.(*fakeTxNotifier).Notify()
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.TimeoutPropose.Nanoseconds())
@ -1340,6 +1337,64 @@ func TestStartNextHeightCorrectly(t *testing.T) {
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round")
}
func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
config.Consensus.SkipTimeoutCommit = false
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
partSize := types.BlockPartSizeBytes
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
addr := cs1.privValidator.GetPubKey().Address()
voteCh := subscribeToVoter(cs1, addr)
// start round and wait for propose and prevote
startTestRound(cs1, height, round)
ensureNewRound(newRoundCh, height, round)
ensureNewProposal(proposalCh, height, round)
rs := cs1.GetRoundState()
theBlockHash := rs.ProposalBlock.Hash()
theBlockParts := rs.ProposalBlockParts.Header()
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], theBlockHash)
signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
ensurePrecommit(voteCh, height, round)
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
rs = cs1.GetRoundState()
// add precommits
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
time.Sleep(5 * time.Millisecond)
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
rs = cs1.GetRoundState()
assert.True(t, rs.TriggeredTimeoutPrecommit)
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
prop, propBlock := decideProposal(cs1, vs2, height+1, 0)
propBlockParts := propBlock.MakePartSet(partSize)
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height+1, 0)
rs = cs1.GetRoundState()
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each height")
}
//------------------------------------------------------------------------------------------
// SlashingSuite
// TODO: Slashing


+ 5
- 2
consensus/types/height_vote_set_test.go View File

@ -2,6 +2,7 @@ package types
import (
"fmt"
"os"
"testing"
cfg "github.com/tendermint/tendermint/config"
@ -11,8 +12,11 @@ import (
var config *cfg.Config // NOTE: must be reset for each _test.go file
func init() {
func TestMain(m *testing.M) {
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
code := m.Run()
os.RemoveAll(config.RootDir)
os.Exit(code)
}
func TestPeerCatchupRounds(t *testing.T) {
@ -64,7 +68,6 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali
err := privVal.SignVote(chainID, vote)
if err != nil {
panic(fmt.Sprintf("Error signing vote: %v", err))
return nil
}
return vote
}

+ 10
- 4
consensus/wal.go View File

@ -116,6 +116,7 @@ func (wal *baseWAL) OnStart() error {
// Use Wait() to ensure it's finished shutting down
// before cleaning up files.
func (wal *baseWAL) OnStop() {
wal.group.Flush()
wal.group.Stop()
wal.group.Close()
}
@ -228,12 +229,17 @@ func NewWALEncoder(wr io.Writer) *WALEncoder {
return &WALEncoder{wr}
}
// Encode writes the custom encoding of v to the stream.
// Encode writes the custom encoding of v to the stream. It returns an error if
// the amino-encoded size of v is greater than 1MB. Any error encountered
// during the write is also returned.
func (enc *WALEncoder) Encode(v *TimedWALMessage) error {
data := cdc.MustMarshalBinaryBare(v)
crc := crc32.Checksum(data, crc32c)
length := uint32(len(data))
if length > maxMsgSizeBytes {
return fmt.Errorf("Msg is too big: %d bytes, max: %d bytes", length, maxMsgSizeBytes)
}
totalLength := 8 + int(length)
msg := make([]byte, totalLength)
@ -306,15 +312,15 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
}
data := make([]byte, length)
_, err = dec.rd.Read(data)
n, err := dec.rd.Read(data)
if err != nil {
return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v", err)}
return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v (read: %d, wanted: %d)", err, n, length)}
}
// check checksum before decoding data
actualCRC := crc32.Checksum(data, crc32c)
if actualCRC != crc {
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)}
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)}
}
var res = new(TimedWALMessage) // nolint: gosimple


+ 8
- 20
consensus/wal_generator.go View File

@ -7,7 +7,7 @@ import (
"io"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/pkg/errors"
@ -28,8 +28,9 @@ import (
// stripped down version of node (proxy app, event bus, consensus state) with a
// persistent kvstore application and special consensus wal instance
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
config := getConfig()
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
config := getConfig(t)
defer os.RemoveAll(config.RootDir)
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
@ -102,11 +103,11 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
}
//WALWithNBlocks returns a WAL content with numBlocks.
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) {
var b bytes.Buffer
wr := bufio.NewWriter(&b)
if err := WALGenerateNBlocks(wr, numBlocks); err != nil {
if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil {
return []byte{}, err
}
@ -114,18 +115,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
return b.Bytes(), nil
}
// f**ing long, but unique for each test
func makePathname() string {
// get path
p, err := os.Getwd()
if err != nil {
panic(err)
}
// fmt.Println(p)
sep := string(filepath.Separator)
return strings.Replace(p, sep, "_", -1)
}
func randPort() int {
// returns between base and base + spread
base, spread := 20000, 20000
@ -140,9 +129,8 @@ func makeAddrs() (string, string, string) {
}
// getConfig returns a config for test cases
func getConfig() *cfg.Config {
pathname := makePathname()
c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt()))
func getConfig(t *testing.T) *cfg.Config {
c := cfg.ResetTestRoot(t.Name())
// and we use random ports to run in parallel
tm, rpc, grpc := makeAddrs()


+ 22
- 2
consensus/wal_test.go View File

@ -48,7 +48,7 @@ func TestWALTruncate(t *testing.T) {
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
//at this time, RotateFile is called, truncate content exist in each file.
err = WALGenerateNBlocks(wal.Group(), 60)
err = WALGenerateNBlocks(t, wal.Group(), 60)
require.NoError(t, err)
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
@ -95,8 +95,28 @@ func TestWALEncoderDecoder(t *testing.T) {
}
}
func TestWALWritePanicsIfMsgIsTooBig(t *testing.T) {
walDir, err := ioutil.TempDir("", "wal")
require.NoError(t, err)
defer os.RemoveAll(walDir)
walFile := filepath.Join(walDir, "wal")
wal, err := NewWAL(walFile)
require.NoError(t, err)
err = wal.Start()
require.NoError(t, err)
defer func() {
wal.Stop()
// wait for the wal to finish shutting down so we
// can safely remove the directory
wal.Wait()
}()
assert.Panics(t, func() { wal.Write(make([]byte, maxMsgSizeBytes+1)) })
}
func TestWALSearchForEndHeight(t *testing.T) {
walBody, err := WALWithNBlocks(6)
walBody, err := WALWithNBlocks(t, 6)
if err != nil {
t.Fatal(err)
}


+ 2
- 0
crypto/encoding/amino/encode_test.go View File

@ -47,6 +47,8 @@ func checkAminoJSON(t *testing.T, src interface{}, dst interface{}, isNil bool)
require.Nil(t, err, "%+v", err)
}
// ExamplePrintRegisteredTypes refers to unknown identifier: PrintRegisteredTypes
//nolint:govet
func ExamplePrintRegisteredTypes() {
cdc.PrintTypes(os.Stdout)
// Output: | Type | Name | Prefix | Length | Notes |


+ 3
- 1
crypto/merkle/proof_key_path_test.go View File

@ -1,6 +1,8 @@
package merkle
import (
// it is ok to use math/rand here: we do not need a cryptographically secure random
// number generator here and we can run the tests a bit faster
"math/rand"
"testing"
@ -24,7 +26,7 @@ func TestKeyPath(t *testing.T) {
keys[i][j] = alphanum[rand.Intn(len(alphanum))]
}
case KeyEncodingHex:
rand.Read(keys[i])
rand.Read(keys[i]) //nolint: gosec
default:
panic("Unexpected encoding")
}


+ 2
- 2
crypto/secp256k1/secp256k1_nocgo.go View File

@ -52,8 +52,8 @@ func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sigStr []byte) bool {
// that len(sigStr) == 64.
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
return &secp256k1.Signature{
new(big.Int).SetBytes(sigStr[:32]),
new(big.Int).SetBytes(sigStr[32:64]),
R: new(big.Int).SetBytes(sigStr[:32]),
S: new(big.Int).SetBytes(sigStr[32:64]),
}
}


+ 4
- 0
docs/spec/abci/apps.md View File

@ -171,6 +171,10 @@ Note that the maximum total power of the validator set is bounded by
they do not make changes to the validator set that cause it to exceed this
limit.
Additionally, applications must ensure that a single set of updates does not contain any duplicates -
a given public key can only appear in an update once. If an update includes
duplicates, the block execution will fail irrecoverably.
### InitChain
ResponseInitChain can return a list of validators.


+ 1
- 1
libs/common/bit_array.go View File

@ -412,6 +412,6 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error {
bA2.SetIndex(i, true)
}
}
*bA = *bA2
*bA = *bA2 //nolint:govet
return nil
}

+ 1
- 1
libs/db/remotedb/doc.go View File

@ -11,7 +11,7 @@ remotedb's RemoteDB implements db.DB so can be used normally
like other databases. One just has to explicitly connect to the
remote database with a client setup such as:
client, err := remotedb.NewInsecure(addr)
client, err := remotedb.NewRemoteDB(addr, cert)
// Make sure to invoke InitRemote!
if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil {
log.Fatalf("Failed to initialize the remote db")


+ 0
- 8
libs/db/remotedb/grpcdb/client.go View File

@ -7,14 +7,6 @@ import (
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
)
// Security defines how the client will talk to the gRPC server.
type Security uint
const (
Insecure Security = iota
Secure
)
// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr.
// Use kind to set the level of security to either Secure or Insecure.
func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) {


+ 1
- 1
libs/db/remotedb/remotedb_test.go View File

@ -14,7 +14,7 @@ import (
func TestRemoteDB(t *testing.T) {
cert := "test.crt"
key := "test.key"
ln, err := net.Listen("tcp", "0.0.0.0:0")
ln, err := net.Listen("tcp", "localhost:0")
require.Nil(t, err, "expecting a port to have been assigned on which we can listen")
srv, err := grpcdb.NewServer(cert, key)
require.Nil(t, err)


+ 17
- 23
libs/db/remotedb/test.crt View File

@ -1,25 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIEQTCCAimgAwIBAgIRANqF1HD19i/uvQ3n62TAKTwwDQYJKoZIhvcNAQELBQAw
GTEXMBUGA1UEAxMOdGVuZGVybWludC5jb20wHhcNMTgwNzAyMDMwNzMyWhcNMjAw
MTAyMDMwNzMwWjANMQswCQYDVQQDEwI6OjCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAOuWUMCSzYJmvKU1vsouDTe7OxnPWO3oV0FjSH8vKYoi2zpZQX35
dQDPtLDF2/v/ANZJ5pzMJR8yMMtEQ4tWxKuGzJw1ZgTgHtASPbj/M5fDnDO7Hqg4
D09eLTkZAUfiBf6BzDyQIHn22CUexhaS70TbIT9AOAoOsGXMZz9d+iImKIm+gbzf
pR52LNbBGesHWGjwIuGF4InstIMsKSwGv2DctzhWI+i/m5Goi3rd1V8z/lzUbsf1
0uXqQcSfTyv3ee6YiCWj2W8vcdc5H+B6KzSlGjAR4sRcHTHOQJYO9BgA9evQ3qsJ
Pp00iez13RdheJWPtbfUqQy4gdpu8HFeZx8CAwEAAaOBjzCBjDAOBgNVHQ8BAf8E
BAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRc
XBo+bJILrLcJiGkTWeMPpXb1TDAfBgNVHSMEGDAWgBQqk1Xu65Ww7EBCROw4KLGw
KuToaDAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAAAAAAMA0GCSqGSIb3DQEBCwUA
A4ICAQAbGsIMhL8clczNmhGl9xZhmyNz6FbLq6g163x9LTgfvwHPt+7urthtd++O
uy4Ut8zFurh/yk7eooPlzf8jO7QUJBAFVy4vj8IcsvpWbFa7cuEOIulbjIzyAm/v
lgy7vUQ6xrWn8x8O9K1ww9z7wugwCyl22BD0wSHZKclJz++AwpL6vUVOD76IIuJO
+S6bE6z26/0ndpundh2AkA++2eIleD6ygnTeTl0PWu6aGoCggBmos50f8KgYHZF/
OZVef203kDls9xCaOiMzaU91VsgLqq/gNcT+2cBd5r3IZTY3C8Rve6EEHS+/4zxf
PKlmiLN7lU9GFZogKecYzY+zPT7OArY7OVFnGTo4qdhdmxnXzHsI+anMCjxLOgEJ
381hyplQGPQOouEupCBxFcwa7oMYoGu20+1nLWYEqFcIXCeyH+s77MyteJSsseqL
xivG5PT+jKJn9hrnFb39bBmht9Vsa+Th6vk953zi5wCSe1j2wXsxFaENDq6BQZOK
f86Kp86M2elYnv3lJ3j2DE2ZTMpw+PA5ThYUnB+HVqYeeB2Y3ErRS8P1FOp1LBE8
+eTz7yXQO5OM2wdYhNNL1zDri/41fHXi9b6337PZVqc39GM+N74x/O4Q7xEBiWgQ
T0dT8SNwf55kv63MeZh63ImxFV0FNRkCteYLcJMle3ohIY4zyQ==
MIIDAjCCAeqgAwIBAgIJAOGCVedOwRbOMA0GCSqGSIb3DQEBBQUAMCExCzAJBgNV
BAYTAlVTMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwMjExMTU0NjQ5WhcNMjAw
MjExMTU0NjQ5WjAhMQswCQYDVQQGEwJVUzESMBAGA1UEAwwJbG9jYWxob3N0MIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA60S/fNUWoHm1PYI/yrlnZNtr
dRqDORHe0hPwl/lttLz7+a7HzQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWc
GjxJL24tVwiOwqYRzTPZ/rK3JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4D
h/XgWjEt4DhpHwf/zuIK9XkJw0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0
AdsQCjt1GKcIROkcOGUHqByINJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhq
HRTCt5UELWs/53Gj1ffNuhjECOVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQID
AQABoz0wOzAMBgNVHRMEBTADAQH/MCsGA1UdEQQkMCKCCWxvY2FsaG9zdIIJbG9j
YWxob3N0hwQAAAAAhwR/AAABMA0GCSqGSIb3DQEBBQUAA4IBAQCe2A5gDc3jiZwT
a5TJrc2J2KouqxB/PCddw5VY8jPsZJfsr9gxHi+Xa5g8p3oqmEOIlqM5BVhrZRUG
RWHDmL+bCsuzMoA/vGHtHmUIwLeZQLWgT3kv12Dc8M9flNNjmXWxdMR9lOMwcL83
F0CdElxSmaEbNvCIJBDetJJ7vMCqS2lnTLWurbH4ZGeGwvjzNgpgGCKwbyK/gU+j
UXiTQbVvPQ3WWACDnfH6rg0TpxU9jOBkd+4/9tUrBG7UclQBfGULk3sObLO9kx4N
8RxJmtp8jljIXVPX3udExI05pz039pAgvaeZWtP17QSbYcKF1jFtKo6ckrv2GKXX
M5OXGXdw
-----END CERTIFICATE-----

+ 25
- 25
libs/db/remotedb/test.key View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpgIBAAKCAQEA65ZQwJLNgma8pTW+yi4NN7s7Gc9Y7ehXQWNIfy8piiLbOllB
ffl1AM+0sMXb+/8A1knmnMwlHzIwy0RDi1bEq4bMnDVmBOAe0BI9uP8zl8OcM7se
qDgPT14tORkBR+IF/oHMPJAgefbYJR7GFpLvRNshP0A4Cg6wZcxnP136IiYoib6B
vN+lHnYs1sEZ6wdYaPAi4YXgiey0gywpLAa/YNy3OFYj6L+bkaiLet3VXzP+XNRu
x/XS5epBxJ9PK/d57piIJaPZby9x1zkf4HorNKUaMBHixFwdMc5Alg70GAD169De
qwk+nTSJ7PXdF2F4lY+1t9SpDLiB2m7wcV5nHwIDAQABAoIBAQCB2/ilPgaUE8d2
ldqWHa5hgw4/2uCdO04ll/GVUczm/PG1BxAnvYL2MIfcTSRGkrjGZjP9SDZKLONi
mD1XKDv+hK5yiKi0lUnGzddCC0JILKYEieeLOGOQD0yERblEA13kfW20EIomUJ+y
TnVIajQD03pPIDoDqTco1fQvpMDFYw5Q//UhH7VBC261GO1akvhT2Gqdb4aKLaYQ
iDW9IEButL5cRKIJuRxToB/JbmPVEF7xIZtm0sf9dtYVOlBQLeID0uHXgaci0enc
de6GMajmj7NFqc36ypb+Ct18fqEwQBYD+TSQdKs7/lMsAXwRjd5HW4RbYiMZyYnf
Dxgh7QVBAoGBAP9aLLIUcIG7+gk1x7xd+8wRhfo+dhsungeCluSigI9AsfDr6dpR
G9/0lEJH56noZZKQueACTmj7shmRB40xFFLc8w0IDRZCnofsl+Z15k9K84uFPA3W
hdZH9nMieU/mRKdcUYK7pHGqbicHTaJQ5ydZ+xb2E+zYQHOzYpQacHv/AoGBAOwv
TjDZSiassnAPYmmfcHtkUF4gf7PTpiZfH0hXHGAb0mJX4cXAoktAeDeHSi2tz3LW
dAc0ReP8Pdf3uSNv7wkJ1KpNRxAhU5bhnDFmjRc7gMZknVOU+az2M+4yGOn/SOiJ
I6uMHgQDS/VsI+N583n6gbGxVHbQfr9TOc4bLpThAoGBAKin0JmWMnEdzRnEMbZS
hPrWIB2Wn794XNws/qjoQ+1aF60+xGhz5etXyYy1nWd1nZDekkZIf62LgKiuR8ST
xA6u7MGQrcQkID06oWGQQZvhr1ZZm76wEBnl0ftdq66AMpwvt46XjReeL78LbdVl
hidRoSwbQDHQ61EADH4xsFXVAoGBAISXqhXSZsZ/fU1b1avmTod3MYcmR4r07vnr
vOwnu05ZUCrVm3IhSvtkHhlOYl5yjVuy+UByICp1mWJ9N/qlBFTWqAVTjOmJTBwQ
XFd/cwXv6cN3CLu7js+DCHRYu5PiNVQWaWgNKWynTSViqGM0O3PnJphTLU/mjMFs
P69toyEBAoGBALh9YsqxHdYdS5WK9chzDfGlaTQ79jwN+gEzQuP1ooLF0JkMgh5W
//2C6kCrgBsGTm1gfHAjEfC04ZDZLFbKLm56YVKUGL6JJNapm6e5kfiZGjbRKWAg
ViCeRS2qQnVbH74GfHyimeTPDI9cJMiJfDDTPbfosqWSsPEcg2jfsySJ
MIIEogIBAAKCAQEA60S/fNUWoHm1PYI/yrlnZNtrdRqDORHe0hPwl/lttLz7+a7H
zQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWcGjxJL24tVwiOwqYRzTPZ/rK3
JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4Dh/XgWjEt4DhpHwf/zuIK9XkJ
w0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0AdsQCjt1GKcIROkcOGUHqByI
NJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhqHRTCt5UELWs/53Gj1ffNuhjE
COVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQIDAQABAoIBAAb5n8+8pZIWaags
L2X8PzN/Sd1L7u4HOJrz2mM3EuiT3ciWRPgwImpETeJ5UW27Qc+0dTahX5DcuYxE
UErefSZ2ru0cMnNEifWVnF3q/IYf7mudss5bJ9NZYi+Dqdu7mTAXp4xFlHtaALbp
iFK/8wjoBbTHNmKWKK0IHx27Z/sjK+7QnoKij+rRzvhmNyN2r3dT7EO4VePriesr
zyVaGexNPFhtd1HLJLQ5GqRAidtLM4x1ubvp3NLTCvvoQKKYFOg7WqKycZ2VllOg
ApcpZb/kB/sNTacLvum5HgMNWuWwgREISuQJR+esz/5WaSTQ04L2+vMVomGM18X+
9n4KYwECgYEA/Usajzl3tWv1IIairSk9Md7Z2sbaPVBNKv4IDJy3mLwt+2VN2mqo
fpeV5rBaFNWzJR0M0JwLbdlsvSfXgVFkUePg1UiJyFqOKmMO8Bd/nxV9NAewVg1D
KXQLsfrojBfka7HtFmfk/GA2swEMCGzUcY23bwah1JUTLhvbl19GNMECgYEA7chW
Ip/IvYBiaaD/qgklwJE8QoAVzi9zqlI1MOJJNf1r/BTeZ2R8oXlRk8PVxFglliuA
vMgwCkfuqxA8irIdHReLzqcLddPtaHo6R8zKP2cpYBo61C3CPzEAucasaOXQFpjs
DPnp4QFeboNPgiEGLVGHFvD5TwZpideBpWTwud0CgYEAy04MDGfJEQKNJ0VJr4mJ
R80iubqgk1QwDFEILu9fYiWxFrbSTX0Mr0eGlzp3o39/okt17L9DYTGCWTVwgajN
x/kLjsYBaaJdt+H4rHeABTWfYDLHs9pDTTOK65mELGZE/rg6n6BWqMelP/qYKO8J
efeRA3mkTVg2o+zSTea4GEECgYEA3DB4EvgD2/fXKhl8puhxnTDgrHQPvS8T3NTj
jLD/Oo/CP1zT1sqm3qCJelwOyBMYO0dtn2OBmQOjb6VJauYlL5tuS59EbYgigG0v
Ku3pG21cUzH26CS3i+zEz0O6xCiL2WEitaF3gnTSDWRrbAVIww6MGiJru1IkyRBX
beFbScECf1n00W9qrXnqsWefk73ucggfV0gQQmDnauMA9J7B96+MvGprE54Tx9vl
SBodgvJsCod9Y9Q7QsMcXb4CuEgTgWKDBp5cA/KUOQmK5buOrysosLnnm12LaHiF
O7IIh8Cmb9TbdldgW+8ndZ4EQ3lfIS0zN3/7rWD34bs19JDYkRY=
-----END RSA PRIVATE KEY-----

+ 2
- 2
lite/client/provider_test.go View File

@ -19,8 +19,7 @@ func TestMain(m *testing.M) {
code := m.Run()
node.Stop()
node.Wait()
rpctest.StopTendermint(node)
os.Exit(code)
}
@ -28,6 +27,7 @@ func TestProvider(t *testing.T) {
assert, require := assert.New(t), require.New(t)
cfg := rpctest.GetConfig()
defer os.RemoveAll(cfg.RootDir)
rpcAddr := cfg.RPC.ListenAddress
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {


+ 1
- 2
lite/proxy/query_test.go View File

@ -32,8 +32,7 @@ func TestMain(m *testing.M) {
code := m.Run()
node.Stop()
node.Wait()
rpctest.StopTendermint(node)
os.Exit(code)
}


+ 2
- 1
mempool/bench_test.go View File

@ -11,7 +11,8 @@ import (
func BenchmarkReap(b *testing.B) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
size := 10000
for i := 0; i < size; i++ {


+ 22
- 10
mempool/mempool_test.go View File

@ -1,8 +1,8 @@
package mempool
import (
"crypto/md5"
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"fmt"
"io/ioutil"
@ -25,7 +25,11 @@ import (
"github.com/tendermint/tendermint/types"
)
func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
// A cleanupFunc cleans up any config / test files created for a particular
// test.
type cleanupFunc func()
func newMempoolWithApp(cc proxy.ClientCreator) (*Mempool, cleanupFunc) {
config := cfg.ResetTestRoot("mempool_test")
appConnMem, _ := cc.NewABCIClient()
@ -36,7 +40,7 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
}
mempool := NewMempool(config.Mempool, appConnMem, 0)
mempool.SetLogger(log.TestingLogger())
return mempool
return mempool, func() { os.RemoveAll(config.RootDir) }
}
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
@ -82,7 +86,8 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
func TestReapMaxBytesMaxGas(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
// Ensure gas calculation behaves as expected
checkTxs(t, mempool, 1)
@ -130,7 +135,8 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
func TestMempoolFilters(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
emptyTxArr := []types.Tx{[]byte{}}
nopPreFilter := func(tx types.Tx) error { return nil }
@ -168,7 +174,8 @@ func TestMempoolFilters(t *testing.T) {
func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil)
err := mempool.CheckTx([]byte{0x01}, nil)
if assert.Error(t, err) {
@ -179,7 +186,8 @@ func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
func TestTxsAvailable(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool.EnableTxsAvailable()
timeoutMS := 500
@ -224,7 +232,9 @@ func TestSerialReap(t *testing.T) {
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
appConnCon, _ := cc.NewABCIClient()
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
err := appConnCon.Start()
@ -364,6 +374,7 @@ func TestMempoolCloseWAL(t *testing.T) {
// 3. Create the mempool
wcfg := cfg.DefaultMempoolConfig()
wcfg.RootDir = rootDir
defer os.RemoveAll(wcfg.RootDir)
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
appConnMem, _ := cc.NewABCIClient()
@ -406,7 +417,8 @@ func txMessageSize(tx types.Tx) int {
func TestMempoolMaxMsgSize(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempl := newMempoolWithApp(cc)
mempl, cleanup := newMempoolWithApp(cc)
defer cleanup()
testCases := []struct {
len int
@ -451,7 +463,7 @@ func TestMempoolMaxMsgSize(t *testing.T) {
}
func checksumIt(data []byte) string {
h := md5.New()
h := sha256.New()
h.Write(data)
return fmt.Sprintf("%x", h.Sum(nil))
}


+ 2
- 1
mempool/reactor_test.go View File

@ -49,7 +49,8 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor
for i := 0; i < N; i++ {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))


+ 5
- 0
node/node.go View File

@ -793,6 +793,11 @@ func (n *Node) ProxyApp() proxy.AppConns {
return n.proxyApp
}
// Config returns the Node's config.
func (n *Node) Config() *cfg.Config {
return n.config
}
//------------------------------------------------------------------------------
func (n *Node) Listeners() []string {


+ 7
- 0
node/node_test.go View File

@ -31,6 +31,7 @@ import (
func TestNodeStartStop(t *testing.T) {
config := cfg.ResetTestRoot("node_node_test")
defer os.RemoveAll(config.RootDir)
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
@ -90,6 +91,7 @@ func TestSplitAndTrimEmpty(t *testing.T) {
func TestNodeDelayedStart(t *testing.T) {
config := cfg.ResetTestRoot("node_delayed_start_test")
defer os.RemoveAll(config.RootDir)
now := tmtime.Now()
// create & start node
@ -104,6 +106,7 @@ func TestNodeDelayedStart(t *testing.T) {
func TestNodeSetAppVersion(t *testing.T) {
config := cfg.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(config.RootDir)
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
@ -124,6 +127,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
addr := "tcp://" + testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.BaseConfig.PrivValidatorListenAddr = addr
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
@ -153,6 +157,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
addrNoPrefix := testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
_, err := DefaultNewNode(config, log.TestingLogger())
@ -164,6 +169,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
defer os.Remove(tmpfile) // clean up
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
dialer := privval.DialUnixFn(tmpfile)
@ -200,6 +206,7 @@ func testFreeAddr(t *testing.T) string {
// mempool and evidence pool and validate it.
func TestCreateProposalBlock(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()


+ 4
- 5
p2p/conn/secret_connection_test.go View File

@ -238,6 +238,10 @@ func TestSecretConnectionReadWrite(t *testing.T) {
for {
n, err := nodeSecretConn.Read(readBuffer)
if err == io.EOF {
if err := nodeConn.PipeReader.Close(); err != nil {
t.Error(err)
return nil, err, true
}
return nil, nil, false
} else if err != nil {
t.Errorf("Failed to read from nodeSecretConn: %v", err)
@ -245,11 +249,6 @@ func TestSecretConnectionReadWrite(t *testing.T) {
}
*nodeReads = append(*nodeReads, string(readBuffer[:n]))
}
if err := nodeConn.PipeReader.Close(); err != nil {
t.Error(err)
return nil, err, true
}
return nil, nil, false
},
)
assert.True(t, ok, "Unexpected task abortion")


+ 24
- 6
p2p/transport.go View File

@ -194,7 +194,7 @@ func (mt *MultiplexTransport) Dial(
return nil, err
}
secretConn, nodeInfo, err := mt.upgrade(c)
secretConn, nodeInfo, err := mt.upgrade(c, &addr)
if err != nil {
return nil, err
}
@ -262,7 +262,7 @@ func (mt *MultiplexTransport) acceptPeers() {
err := mt.filterConn(c)
if err == nil {
secretConn, nodeInfo, err = mt.upgrade(c)
secretConn, nodeInfo, err = mt.upgrade(c, nil)
}
select {
@ -279,9 +279,9 @@ func (mt *MultiplexTransport) acceptPeers() {
// Cleanup removes the given address from the connections set and
// closes the connection.
func (mt *MultiplexTransport) Cleanup(peer Peer) {
mt.conns.RemoveAddr(peer.RemoteAddr())
_ = peer.CloseConn()
func (mt *MultiplexTransport) Cleanup(p Peer) {
mt.conns.RemoveAddr(p.RemoteAddr())
_ = p.CloseConn()
}
func (mt *MultiplexTransport) cleanup(c net.Conn) error {
@ -335,6 +335,7 @@ func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) {
func (mt *MultiplexTransport) upgrade(
c net.Conn,
dialedAddr *NetAddress,
) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) {
defer func() {
if err != nil {
@ -351,6 +352,23 @@ func (mt *MultiplexTransport) upgrade(
}
}
// For outgoing conns, ensure connection key matches dialed key.
connID := PubKeyToID(secretConn.RemotePubKey())
if dialedAddr != nil {
if dialedID := dialedAddr.ID; connID != dialedID {
return nil, nil, ErrRejected{
conn: c,
id: connID,
err: fmt.Errorf(
"conn.ID (%v) dialed ID (%v) missmatch",
connID,
dialedID,
),
isAuthFailure: true,
}
}
}
nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo)
if err != nil {
return nil, nil, ErrRejected{
@ -369,7 +387,7 @@ func (mt *MultiplexTransport) upgrade(
}
// Ensure connection key matches self reported key.
if connID := PubKeyToID(secretConn.RemotePubKey()); connID != nodeInfo.ID() {
if connID != nodeInfo.ID() {
return nil, nil, ErrRejected{
conn: c,
id: connID,


+ 44
- 15
p2p/transport_test.go View File

@ -160,8 +160,7 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
},
)
)
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
if err != nil {
errc <- err
return
@ -230,7 +229,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
// Simulate slow Peer.
go func() {
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
if err != nil {
errc <- err
return
@ -281,8 +280,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
},
)
)
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
if err != nil {
errc <- err
return
@ -328,7 +326,7 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) {
)
)
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
if err != nil {
errc <- err
return
@ -371,8 +369,7 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
PrivKey: ed25519.GenPrivKey(),
},
)
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
if err != nil {
errc <- err
return
@ -401,6 +398,38 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
}
}
func TestTransportMultiplexDialRejectWrongID(t *testing.T) {
mt := testSetupMultiplexTransport(t)
var (
pv = ed25519.GenPrivKey()
dialer = newMultiplexTransport(
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
NodeKey{
PrivKey: pv,
},
)
)
wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(wrongID, mt.listener.Addr().String()))
if err != nil {
t.Fatalf("invalid address with ID: %v", err)
}
_, err = dialer.Dial(*addr, peerConfig{})
if err != nil {
t.Logf("connection failed: %v", err)
if err, ok := err.(ErrRejected); ok {
if !err.IsAuthFailure() {
t.Errorf("expected auth failure")
}
} else {
t.Errorf("expected ErrRejected")
}
}
}
func TestTransportMultiplexRejectIncompatible(t *testing.T) {
mt := testSetupMultiplexTransport(t)
@ -416,8 +445,7 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) {
},
)
)
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
if err != nil {
errc <- err
return
@ -448,7 +476,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
errc := make(chan error)
go func() {
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
if err != nil {
errc <- err
return
@ -466,7 +494,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
if err := <-errc; err != nil {
if err, ok := err.(ErrRejected); ok {
if !err.IsSelf() {
t.Errorf("expected to reject self")
t.Errorf("expected to reject self, got: %v", err)
}
} else {
t.Errorf("expected ErrRejected")
@ -478,7 +506,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
_, err := mt.Accept(peerConfig{})
if err, ok := err.(ErrRejected); ok {
if !err.IsSelf() {
t.Errorf("expected to reject self")
t.Errorf("expected to reject self, got: %v", err)
}
} else {
t.Errorf("expected ErrRejected")
@ -566,9 +594,10 @@ func TestTransportHandshake(t *testing.T) {
func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
var (
pv = ed25519.GenPrivKey()
id = PubKeyToID(pv.PubKey())
mt = newMultiplexTransport(
testNodeInfo(
PubKeyToID(pv.PubKey()), "transport",
id, "transport",
),
NodeKey{
PrivKey: pv,
@ -576,7 +605,7 @@ func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
)
)
addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0")
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, "127.0.0.1:0"))
if err != nil {
t.Fatal(err)
}


+ 0
- 1
privval/file.go View File

@ -31,7 +31,6 @@ func voteToStep(vote *types.Vote) int8 {
return stepPrecommit
default:
panic("Unknown vote type")
return 0
}
}


+ 1
- 1
privval/server.go View File

@ -67,7 +67,7 @@ func DialTCPFn(addr string, connTimeout time.Duration, privKey ed25519.PrivKeyEd
// DialUnixFn dials the given unix socket.
func DialUnixFn(addr string) Dialer {
return func() (net.Conn, error) {
unixAddr := &net.UnixAddr{addr, "unix"}
unixAddr := &net.UnixAddr{Name: addr, Net: "unix"}
return net.DialUnix("unix", nil, unixAddr)
}
}


+ 2
- 2
rpc/client/main_test.go View File

@ -15,10 +15,10 @@ func TestMain(m *testing.M) {
// start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewKVStoreApplication()
node = rpctest.StartTendermint(app)
code := m.Run()
// and shut down proper at the end
node.Stop()
node.Wait()
rpctest.StopTendermint(node)
os.Exit(code)
}

+ 6
- 6
rpc/client/mock/abci.go View File

@ -23,7 +23,7 @@ var (
)
func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return &ctypes.ResultABCIInfo{a.App.Info(proxy.RequestInfo)}, nil
return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil
}
func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
@ -37,7 +37,7 @@ func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clien
Height: opts.Height,
Prove: opts.Prove,
})
return &ctypes.ResultABCIQuery{q}, nil
return &ctypes.ResultABCIQuery{Response: q}, nil
}
// NOTE: Caller should call a.App.Commit() separately,
@ -60,7 +60,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error
if !c.IsErr() {
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil
}
func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
@ -69,7 +69,7 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
if !c.IsErr() {
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil
}
// ABCIMock will send all abci related request to the named app,
@ -87,7 +87,7 @@ func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
if err != nil {
return nil, err
}
return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil
return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil
}
func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
@ -100,7 +100,7 @@ func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clie
return nil, err
}
resQuery := res.(abci.ResponseQuery)
return &ctypes.ResultABCIQuery{resQuery}, nil
return &ctypes.ResultABCIQuery{Response: resQuery}, nil
}
func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {


+ 1
- 1
rpc/client/rpc_test.go View File

@ -42,9 +42,9 @@ func TestCorsEnabled(t *testing.T) {
req.Header.Set("Origin", origin)
c := &http.Client{}
resp, err := c.Do(req)
require.Nil(t, err, "%+v", err)
defer resp.Body.Close()
require.Nil(t, err, "%+v", err)
assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin)
}


+ 2
- 2
rpc/core/abci.go View File

@ -63,7 +63,7 @@ func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctype
return nil, err
}
logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery)
return &ctypes.ResultABCIQuery{*resQuery}, nil
return &ctypes.ResultABCIQuery{Response: *resQuery}, nil
}
// Get some info about the application.
@ -101,5 +101,5 @@ func ABCIInfo() (*ctypes.ResultABCIInfo, error) {
if err != nil {
return nil, err
}
return &ctypes.ResultABCIInfo{*resInfo}, nil
return &ctypes.ResultABCIInfo{Response: *resInfo}, nil
}

+ 4
- 2
rpc/core/blocks.go View File

@ -85,7 +85,9 @@ func BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, e
blockMetas = append(blockMetas, blockMeta)
}
return &ctypes.ResultBlockchainInfo{blockStore.Height(), blockMetas}, nil
return &ctypes.ResultBlockchainInfo{
LastHeight: blockStore.Height(),
BlockMetas: blockMetas}, nil
}
// error if either min or max are negative or min < max
@ -233,7 +235,7 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) {
blockMeta := blockStore.LoadBlockMeta(height)
block := blockStore.LoadBlock(height)
return &ctypes.ResultBlock{blockMeta, block}, nil
return &ctypes.ResultBlock{BlockMeta: blockMeta, Block: block}, nil
}
// Get block commit at a given height.


+ 10
- 4
rpc/core/consensus.go View File

@ -60,7 +60,9 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
if err != nil {
return nil, err
}
return &ctypes.ResultValidators{height, validators.Validators}, nil
return &ctypes.ResultValidators{
BlockHeight: height,
Validators: validators.Validators}, nil
}
// DumpConsensusState dumps consensus state.
@ -223,7 +225,9 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
if err != nil {
return nil, err
}
return &ctypes.ResultDumpConsensusState{roundState, peerStates}, nil
return &ctypes.ResultDumpConsensusState{
RoundState: roundState,
Peers: peerStates}, nil
}
// ConsensusState returns a concise summary of the consensus state.
@ -276,7 +280,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
func ConsensusState() (*ctypes.ResultConsensusState, error) {
// Get self round state.
bz, err := consensusState.GetRoundStateSimpleJSON()
return &ctypes.ResultConsensusState{bz}, err
return &ctypes.ResultConsensusState{RoundState: bz}, err
}
// Get the consensus parameters at the given block height.
@ -327,5 +331,7 @@ func ConsensusParams(heightPtr *int64) (*ctypes.ResultConsensusParams, error) {
if err != nil {
return nil, err
}
return &ctypes.ResultConsensusParams{BlockHeight: height, ConsensusParams: consensusparams}, nil
return &ctypes.ResultConsensusParams{
BlockHeight: height,
ConsensusParams: consensusparams}, nil
}

+ 1
- 1
rpc/core/events.go View File

@ -109,7 +109,7 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri
go func() {
for event := range ch {
tmResult := &ctypes.ResultEvent{query, event.(tmtypes.TMEventData)}
tmResult := &ctypes.ResultEvent{Query: query, Data: event.(tmtypes.TMEventData)}
wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Codec(), rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", wsCtx.Request.ID)), tmResult))
}
}()


+ 1
- 1
rpc/core/mempool.go View File

@ -275,7 +275,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
limit = validatePerPage(limit)
txs := mempool.ReapMaxTxs(limit)
return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil
return &ctypes.ResultUnconfirmedTxs{N: len(txs), Txs: txs}, nil
}
// Get number of unconfirmed transactions.


+ 129
- 17
rpc/core/net.go View File

@ -29,21 +29,133 @@ import (
//
// ```json
// {
// "error": "",
// "result": {
// "n_peers": "0",
// "peers": [],
// "listeners": [
// "Listener(@10.0.2.15:26656)"
// ],
// "listening": true
// },
// "id": "",
// "jsonrpc": "2.0"
// }
// "jsonrpc": "2.0",
// "id": "",
// "result": {
// "listening": true,
// "listeners": [
// "Listener(@)"
// ],
// "n_peers": "3",
// "peers": [
// {
// "node_info": {
// "protocol_version": {
// "p2p": "7",
// "block": "8",
// "app": "1"
// },
// "id": "93529da3435c090d02251a050342b6a488d4ab56",
// "listen_addr": "tcp://0.0.0.0:26656",
// "network": "chain-RFo6qC",
// "version": "0.30.0",
// "channels": "4020212223303800",
// "moniker": "fc89e4ed23f2",
// "other": {
// "tx_index": "on",
// "rpc_address": "tcp://0.0.0.0:26657"
// }
// },
// "is_outbound": true,
// "connection_status": {
// "Duration": "3475230558",
// "SendMonitor": {
// "Active": true,
// "Start": "2019-02-14T12:40:47.52Z",
// "Duration": "3480000000",
// "Idle": "240000000",
// "Bytes": "4512",
// "Samples": "9",
// "InstRate": "1338",
// "CurRate": "2046",
// "AvgRate": "1297",
// "PeakRate": "6570",
// "BytesRem": "0",
// "TimeRem": "0",
// "Progress": 0
// },
// "RecvMonitor": {
// "Active": true,
// "Start": "2019-02-14T12:40:47.52Z",
// "Duration": "3480000000",
// "Idle": "280000000",
// "Bytes": "4489",
// "Samples": "10",
// "InstRate": "1821",
// "CurRate": "1663",
// "AvgRate": "1290",
// "PeakRate": "5512",
// "BytesRem": "0",
// "TimeRem": "0",
// "Progress": 0
// },
// "Channels": [
// {
// "ID": 48,
// "SendQueueCapacity": "1",
// "SendQueueSize": "0",
// "Priority": "5",
// "RecentlySent": "0"
// },
// {
// "ID": 64,
// "SendQueueCapacity": "1000",
// "SendQueueSize": "0",
// "Priority": "10",
// "RecentlySent": "14"
// },
// {
// "ID": 32,
// "SendQueueCapacity": "100",
// "SendQueueSize": "0",
// "Priority": "5",
// "RecentlySent": "619"
// },
// {
// "ID": 33,
// "SendQueueCapacity": "100",
// "SendQueueSize": "0",
// "Priority": "10",
// "RecentlySent": "1363"
// },
// {
// "ID": 34,
// "SendQueueCapacity": "100",
// "SendQueueSize": "0",
// "Priority": "5",
// "RecentlySent": "2145"
// },
// {
// "ID": 35,
// "SendQueueCapacity": "2",
// "SendQueueSize": "0",
// "Priority": "1",
// "RecentlySent": "0"
// },
// {
// "ID": 56,
// "SendQueueCapacity": "1",
// "SendQueueSize": "0",
// "Priority": "5",
// "RecentlySent": "0"
// },
// {
// "ID": 0,
// "SendQueueCapacity": "10",
// "SendQueueSize": "0",
// "Priority": "1",
// "RecentlySent": "10"
// }
// ]
// },
// "remote_ip": "192.167.10.3"
// },
// ...
// }
// ```
func NetInfo() (*ctypes.ResultNetInfo, error) {
peers := []ctypes.Peer{}
out, in, _ := p2pPeers.NumPeers()
peers := make([]ctypes.Peer, 0, out+in)
for _, peer := range p2pPeers.Peers().List() {
nodeInfo, ok := peer.NodeInfo().(p2p.DefaultNodeInfo)
if !ok {
@ -53,7 +165,7 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
NodeInfo: nodeInfo,
IsOutbound: peer.IsOutbound(),
ConnectionStatus: peer.Status(),
RemoteIP: peer.RemoteIP(),
RemoteIP: peer.RemoteIP().String(),
})
}
// TODO: Should we include PersistentPeers and Seeds in here?
@ -77,7 +189,7 @@ func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
if err != nil {
return &ctypes.ResultDialSeeds{}, err
}
return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil
return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil
}
func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
@ -90,7 +202,7 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers,
if err != nil {
return &ctypes.ResultDialPeers{}, err
}
return &ctypes.ResultDialPeers{"Dialing peers in progress. See /net_info for details"}, nil
return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil
}
// Get genesis file.
@ -136,5 +248,5 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers,
// }
// ```
func Genesis() (*ctypes.ResultGenesis, error) {
return &ctypes.ResultGenesis{genDoc}, nil
return &ctypes.ResultGenesis{Genesis: genDoc}, nil
}

+ 1
- 2
rpc/core/types/responses.go View File

@ -2,7 +2,6 @@ package core_types
import (
"encoding/json"
"net"
"time"
abci "github.com/tendermint/tendermint/abci/types"
@ -111,7 +110,7 @@ type Peer struct {
NodeInfo p2p.DefaultNodeInfo `json:"node_info"`
IsOutbound bool `json:"is_outbound"`
ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
RemoteIP net.IP `json:"remote_ip"`
RemoteIP string `json:"remote_ip"`
}
// Validators for a height


+ 2
- 2
rpc/grpc/grpc_test.go View File

@ -16,11 +16,11 @@ func TestMain(m *testing.M) {
// start a tendermint node in the background to test against
app := kvstore.NewKVStoreApplication()
node := rpctest.StartTendermint(app)
code := m.Run()
// and shut down proper at the end
node.Stop()
node.Wait()
rpctest.StopTendermint(node)
os.Exit(code)
}


+ 8
- 0
rpc/test/helpers.go View File

@ -116,6 +116,14 @@ func StartTendermint(app abci.Application) *nm.Node {
return node
}
// StopTendermint stops a test tendermint server, waits until it's stopped and
// cleans up test/config files.
func StopTendermint(node *nm.Node) {
node.Stop()
node.Wait()
os.RemoveAll(node.Config().RootDir)
}
// NewTendermint creates a new tendermint server and sleeps forever
func NewTendermint(app abci.Application) *nm.Node {
// Create & start node


+ 1
- 1
state/execution.go View File

@ -446,7 +446,7 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty
})
for i, tx := range block.Data.Txs {
eventBus.PublishEventTx(types.EventDataTx{types.TxResult{
eventBus.PublishEventTx(types.EventDataTx{TxResult: types.TxResult{
Height: block.Height,
Index: uint32(i),
Tx: tx,


+ 27
- 0
state/execution_test.go View File

@ -354,6 +354,33 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
}
}
// TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that
// would result in empty set causes no panic, an error is raised and NextValidators is not updated
func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
app := &testApp{}
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.Nil(t, err)
defer proxyApp.Stop()
state, stateDB := state(1, 1)
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), MockMempool{}, MockEvidencePool{})
block := makeBlock(state, 1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
// Remove the only validator
app.ValidatorUpdates = []abci.ValidatorUpdate{
{PubKey: types.TM2PB.PubKey(state.Validators.Validators[0].PubKey), Power: 0},
}
assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) })
assert.NotNil(t, err)
assert.NotEmpty(t, state.NextValidators.Validators)
}
//----------------------------------------------------------------------------
// make some bogus txs


+ 6
- 5
state/state_test.go View File

@ -5,6 +5,7 @@ import (
"fmt"
"math"
"math/big"
"os"
"testing"
"github.com/stretchr/testify/assert"
@ -28,7 +29,7 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) {
state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile")
tearDown := func(t *testing.T) {}
tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) }
return tearDown, stateDB, state
}
@ -313,12 +314,12 @@ func TestProposerFrequency(t *testing.T) {
}
}
// some random test cases with up to 300 validators
// some random test cases with up to 100 validators
maxVals := 100
maxPower := 1000
nTestCases := 5
for i := 0; i < nTestCases; i++ {
N := cmn.RandInt() % maxVals
N := cmn.RandInt()%maxVals + 1
vals := make([]*types.Validator, N)
totalVotePower := int64(0)
for j := 0; j < N; j++ {
@ -802,10 +803,10 @@ func TestLargeGenesisValidator(t *testing.T) {
func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
const valSetSize = 2
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
SaveState(stateDB, state)
defer tearDown(t)
nextHeight := state.LastBlockHeight + 1
@ -825,11 +826,11 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
func TestManyValidatorChangesSaveLoad(t *testing.T) {
const valSetSize = 7
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
require.Equal(t, int64(0), state.LastBlockHeight)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
SaveState(stateDB, state)
defer tearDown(t)
_, valOld := state.Validators.GetByIndex(0)
var pubkeyOld = valOld.PubKey


+ 8
- 5
tools/tm-bench/transacter.go View File

@ -1,11 +1,14 @@
package main
import (
"crypto/md5"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
// it is ok to use math/rand here: we do not need a cryptographically secure random
// number generator here and we can run the tests a bit faster
"math/rand"
"net"
"net/http"
@ -154,12 +157,12 @@ func (t *transacter) sendLoop(connIndex int) {
}()
// hash of the host name is a part of each tx
var hostnameHash [md5.Size]byte
var hostnameHash [sha256.Size]byte
hostname, err := os.Hostname()
if err != nil {
hostname = "127.0.0.1"
}
hostnameHash = md5.Sum([]byte(hostname))
hostnameHash = sha256.Sum256([]byte(hostname))
// each transaction embeds connection index, tx number and hash of the hostname
// we update the tx number between successive txs
tx := generateTx(connIndex, txNumber, t.Size, hostnameHash)
@ -257,7 +260,7 @@ func connect(host string) (*websocket.Conn, *http.Response, error) {
return websocket.DefaultDialer.Dial(u.String(), nil)
}
func generateTx(connIndex int, txNumber int, txSize int, hostnameHash [md5.Size]byte) []byte {
func generateTx(connIndex int, txNumber int, txSize int, hostnameHash [sha256.Size]byte) []byte {
tx := make([]byte, txSize)
binary.PutUvarint(tx[:8], uint64(connIndex))
@ -266,7 +269,7 @@ func generateTx(connIndex int, txNumber int, txSize int, hostnameHash [md5.Size]
binary.PutUvarint(tx[32:40], uint64(time.Now().Unix()))
// 40-* random data
if _, err := rand.Read(tx[40:]); err != nil {
if _, err := rand.Read(tx[40:]); err != nil { //nolint: gosec
panic(errors.Wrap(err, "failed to read random bytes"))
}


+ 3
- 3
tools/tm-bench/transacter_test.go View File

@ -1,7 +1,7 @@
package main
import (
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
@ -28,7 +28,7 @@ func TestGenerateTxUpdateTxConsistentency(t *testing.T) {
}
for tcIndex, tc := range cases {
hostnameHash := md5.Sum([]byte(tc.hostname))
hostnameHash := sha256.Sum256([]byte(tc.hostname))
// Tx generated from update tx. This is defined outside of the loop, since we have
// to a have something initially to update
updatedTx := generateTx(tc.connIndex, tc.startingTxNumber, tc.txSize, hostnameHash)
@ -69,7 +69,7 @@ func BenchmarkIterationOfSendLoop(b *testing.B) {
// something too far away to matter
endTime := now.Add(time.Hour)
txNumber := 0
hostnameHash := md5.Sum([]byte{0})
hostnameHash := sha256.Sum256([]byte{0})
tx := generateTx(connIndex, txNumber, txSize, hostnameHash)
txHex := make([]byte, len(tx)*2)
hex.Encode(txHex, tx)


+ 2
- 2
tools/tm-monitor/monitor/node.go View File

@ -21,8 +21,8 @@ const maxRestarts = 25
type Node struct {
rpcAddr string
IsValidator bool `json:"is_validator"` // validator or non-validator?
pubKey crypto.PubKey `json:"pub_key"`
IsValidator bool `json:"is_validator"` // validator or non-validator?
pubKey crypto.PubKey
Name string `json:"name"`
Online bool `json:"online"`


+ 4
- 2
types/block_test.go View File

@ -1,6 +1,8 @@
package types
import (
// it is ok to use math/rand here: we do not need a cryptographically secure random
// number generator here and we can run the tests a bit faster
"crypto/rand"
"math"
"os"
@ -162,8 +164,8 @@ func TestBlockString(t *testing.T) {
func makeBlockIDRandom() BlockID {
blockHash := make([]byte, tmhash.Size)
partSetHash := make([]byte, tmhash.Size)
rand.Read(blockHash)
rand.Read(partSetHash)
rand.Read(blockHash) //nolint: gosec
rand.Read(partSetHash) //nolint: gosec
blockPartsHeader := PartSetHeader{123, partSetHash}
return BlockID{blockHash, blockPartsHeader}
}


+ 159
- 58
types/validator_set_test.go View File

@ -658,88 +658,189 @@ type testVal struct {
power int64
}
func TestValSetUpdatesBasicTestsExecute(t *testing.T) {
valSetUpdatesBasicTests := []struct {
startVals []testVal
updateVals []testVal
expectedVals []testVal
expError bool
}{
// Operations that should result in error
0: { // updates leading to overflows
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v1", math.MaxInt64}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
1: { // duplicate entries in changes
[]testVal{{"v1", 10}, {"v2", 10}},
func testValSet(nVals int, power int64) []testVal {
vals := make([]testVal, nVals)
for i := 0; i < nVals; i++ {
vals[i] = testVal{fmt.Sprintf("v%d", i+1), power}
}
return vals
}
type valSetErrTestCase struct {
startVals []testVal
updateVals []testVal
}
func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) {
// create a new set and apply updates, keeping copies for the checks
valSet := createNewValidatorSet(tt.startVals)
valSetCopy := valSet.Copy()
valList := createNewValidatorList(tt.updateVals)
valListCopy := validatorListCopy(valList)
err := valSet.UpdateWithChangeSet(valList)
// for errors check the validator set has not been changed
assert.Error(t, err, "test %d", idx)
assert.Equal(t, valSet, valSetCopy, "test %v", idx)
// check the parameter list has not changed
assert.Equal(t, valList, valListCopy, "test %v", idx)
}
func TestValSetUpdatesDuplicateEntries(t *testing.T) {
testCases := []valSetErrTestCase{
// Duplicate entries in changes
{ // first entry is duplicated change
testValSet(2, 10),
[]testVal{{"v1", 11}, {"v1", 22}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
2: { // duplicate entries in removes
[]testVal{{"v1", 10}, {"v2", 10}},
},
{ // second entry is duplicated change
testValSet(2, 10),
[]testVal{{"v2", 11}, {"v2", 22}},
},
{ // change duplicates are separated by a valid change
testValSet(2, 10),
[]testVal{{"v1", 11}, {"v2", 22}, {"v1", 12}},
},
{ // change duplicates are separated by a valid change
testValSet(3, 10),
[]testVal{{"v1", 11}, {"v3", 22}, {"v1", 12}},
},
// Duplicate entries in remove
{ // first entry is duplicated remove
testValSet(2, 10),
[]testVal{{"v1", 0}, {"v1", 0}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
3: { // duplicate entries in removes + changes
[]testVal{{"v1", 10}, {"v2", 10}},
},
{ // second entry is duplicated remove
testValSet(2, 10),
[]testVal{{"v2", 0}, {"v2", 0}},
},
{ // remove duplicates are separated by a valid remove
testValSet(2, 10),
[]testVal{{"v1", 0}, {"v2", 0}, {"v1", 0}},
},
{ // remove duplicates are separated by a valid remove
testValSet(3, 10),
[]testVal{{"v1", 0}, {"v3", 0}, {"v1", 0}},
},
{ // remove and update same val
testValSet(2, 10),
[]testVal{{"v1", 0}, {"v2", 20}, {"v1", 30}},
},
{ // duplicate entries in removes + changes
testValSet(2, 10),
[]testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
4: { // update with negative voting power
[]testVal{{"v1", 10}, {"v2", 10}},
},
{ // duplicate entries in removes + changes
testValSet(3, 10),
[]testVal{{"v1", 0}, {"v3", 5}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
},
}
for i, tt := range testCases {
executeValSetErrTestCase(t, i, tt)
}
}
func TestValSetUpdatesOverflows(t *testing.T) {
maxVP := MaxTotalVotingPower
testCases := []valSetErrTestCase{
{ // single update leading to overflow
testValSet(2, 10),
[]testVal{{"v1", math.MaxInt64}},
},
{ // single update leading to overflow
testValSet(2, 10),
[]testVal{{"v2", math.MaxInt64}},
},
{ // add validator leading to exceed Max
testValSet(1, maxVP-1),
[]testVal{{"v2", 5}},
},
{ // add validator leading to exceed Max
testValSet(2, maxVP/3),
[]testVal{{"v3", maxVP / 2}},
},
}
for i, tt := range testCases {
executeValSetErrTestCase(t, i, tt)
}
}
func TestValSetUpdatesOtherErrors(t *testing.T) {
testCases := []valSetErrTestCase{
{ // update with negative voting power
testValSet(2, 10),
[]testVal{{"v1", -123}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
5: { // delete non existing validator
[]testVal{{"v1", 10}, {"v2", 10}},
},
{ // update with negative voting power
testValSet(2, 10),
[]testVal{{"v2", -123}},
},
{ // remove non-existing validator
testValSet(2, 10),
[]testVal{{"v3", 0}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
},
{ // delete all validators
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
[]testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}},
},
}
for i, tt := range testCases {
executeValSetErrTestCase(t, i, tt)
}
}
// Operations that should be successful
6: { // no changes
[]testVal{{"v1", 10}, {"v2", 10}},
func TestValSetUpdatesBasicTestsExecute(t *testing.T) {
valSetUpdatesBasicTests := []struct {
startVals []testVal
updateVals []testVal
expectedVals []testVal
}{
{ // no changes
testValSet(2, 10),
[]testVal{},
[]testVal{{"v1", 10}, {"v2", 10}},
false},
7: { // voting power changes
[]testVal{{"v1", 10}, {"v2", 10}},
testValSet(2, 10),
},
{ // voting power changes
testValSet(2, 10),
[]testVal{{"v1", 11}, {"v2", 22}},
[]testVal{{"v1", 11}, {"v2", 22}},
false},
8: { // add new validators
},
{ // add new validators
[]testVal{{"v1", 10}, {"v2", 20}},
[]testVal{{"v3", 30}, {"v4", 40}},
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}},
false},
9: { // delete validators
},
{ // add new validator to middle
[]testVal{{"v1", 10}, {"v3", 20}},
[]testVal{{"v2", 30}},
[]testVal{{"v1", 10}, {"v2", 30}, {"v3", 20}},
},
{ // add new validator to beginning
[]testVal{{"v2", 10}, {"v3", 20}},
[]testVal{{"v1", 30}},
[]testVal{{"v1", 30}, {"v2", 10}, {"v3", 20}},
},
{ // delete validators
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
[]testVal{{"v2", 0}},
[]testVal{{"v1", 10}, {"v3", 30}},
false},
10: { // delete all validators
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
[]testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}},
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
true},
},
}
for i, tt := range valSetUpdatesBasicTests {
// create a new set and apply updates, keeping copies for the checks
valSet := createNewValidatorSet(tt.startVals)
valSetCopy := valSet.Copy()
valList := createNewValidatorList(tt.updateVals)
valListCopy := validatorListCopy(valList)
err := valSet.UpdateWithChangeSet(valList)
assert.NoError(t, err, "test %d", i)
if tt.expError {
// for errors check the validator set has not been changed
assert.Error(t, err, "test %d", i)
assert.Equal(t, valSet, valSetCopy, "test %v", i)
} else {
assert.NoError(t, err, "test %d", i)
}
// check the parameter list has not changed
assert.Equal(t, valList, valListCopy, "test %v", i)


+ 20
- 19
types/vote_test.go View File

@ -67,23 +67,23 @@ func TestVoteSignable(t *testing.T) {
require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Vote.")
}
func TestVoteSignableTestVectors(t *testing.T) {
vote := CanonicalizeVote("", &Vote{Height: 1, Round: 1})
func TestVoteSignBytesTestVectors(t *testing.T) {
tests := []struct {
canonicalVote CanonicalVote
want []byte
chainID string
vote *Vote
want []byte
}{
{
CanonicalizeVote("", &Vote{}),
0: {
"", &Vote{},
// NOTE: Height and Round are skipped here. This case needs to be considered while parsing.
// []byte{0x2a, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff},
[]byte{0x2a, 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
[]byte{0xd, 0x2a, 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
// with proper (fixed size) height and round (PreCommit):
{
CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrecommitType}),
1: {
"", &Vote{Height: 1, Round: 1, Type: PrecommitType},
[]byte{
0x21, // length
0x8, // (field_number << 3) | wire_type
0x2, // PrecommitType
0x11, // (field_number << 3) | wire_type
@ -95,9 +95,10 @@ func TestVoteSignableTestVectors(t *testing.T) {
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
// with proper (fixed size) height and round (PreVote):
{
CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrevoteType}),
2: {
"", &Vote{Height: 1, Round: 1, Type: PrevoteType},
[]byte{
0x21, // length
0x8, // (field_number << 3) | wire_type
0x1, // PrevoteType
0x11, // (field_number << 3) | wire_type
@ -108,9 +109,10 @@ func TestVoteSignableTestVectors(t *testing.T) {
// remaining fields (timestamp):
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
{
vote,
3: {
"", &Vote{Height: 1, Round: 1},
[]byte{
0x1f, // length
0x11, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
0x19, // (field_number << 3) | wire_type
@ -120,9 +122,10 @@ func TestVoteSignableTestVectors(t *testing.T) {
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
// containing non-empty chain_id:
{
CanonicalizeVote("test_chain_id", &Vote{Height: 1, Round: 1}),
4: {
"test_chain_id", &Vote{Height: 1, Round: 1},
[]byte{
0x2e, // length
0x11, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
0x19, // (field_number << 3) | wire_type
@ -135,9 +138,7 @@ func TestVoteSignableTestVectors(t *testing.T) {
},
}
for i, tc := range tests {
got, err := cdc.MarshalBinaryBare(tc.canonicalVote)
require.NoError(t, err)
got := tc.vote.SignBytes(tc.chainID)
require.Equal(t, tc.want, got, "test case #%v: got unexpected sign bytes for Vote.", i)
}
}


+ 1
- 1
version/version.go View File

@ -20,7 +20,7 @@ const (
// Must be a string because scripts like dist.sh read this file.
// XXX: Don't change the name of this variable or you will break
// automation :)
TMCoreSemVer = "0.30.0"
TMCoreSemVer = "0.30.1"
// ABCISemVer is the semantic version of the ABCI library
ABCISemVer = "0.15.0"


Loading…
Cancel
Save