Browse Source

Merge pull request #1121 from tendermint/consensus-tests

Consensus tests
pull/1092/head
Ethan Buchman 7 years ago
committed by GitHub
parent
commit
02c1aef48b
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 227 additions and 153 deletions
  1. +3
    -3
      Makefile
  2. +5
    -1
      cmd/tendermint/commands/root.go
  3. +124
    -58
      cmd/tendermint/commands/root_test.go
  4. +21
    -3
      config/config.go
  5. +2
    -0
      config/toml.go
  6. +2
    -2
      consensus/common_test.go
  7. +5
    -5
      consensus/mempool_test.go
  8. +21
    -35
      consensus/reactor_test.go
  9. +3
    -3
      consensus/replay_test.go
  10. +16
    -16
      consensus/state_test.go
  11. +1
    -1
      consensus/wal_test.go
  12. +11
    -11
      glide.lock
  13. +1
    -1
      glide.yaml
  14. +5
    -7
      mempool/mempool.go
  15. +3
    -2
      mempool/mempool_test.go
  16. +1
    -0
      node/node.go
  17. +3
    -2
      p2p/test_util.go
  18. +0
    -3
      test/run_test.sh

+ 3
- 3
Makefile View File

@ -2,14 +2,14 @@ GOTOOLS = \
github.com/mitchellh/gox \
github.com/Masterminds/glide \
github.com/tcnksm/ghr \
gopkg.in/alecthomas/gometalinter.v2
# gopkg.in/alecthomas/gometalinter.v2
GOTOOLS_CHECK = gox glide ghr gometalinter.v2
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
TMHOME = $${TMHOME:-$$HOME/.tendermint}
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short HEAD`"
all: check build test install metalinter
all: check build test install
check: check_tools get_vendor_deps
@ -42,7 +42,7 @@ check_tools:
get_tools:
@echo "--> Installing tools"
go get -u -v $(GOTOOLS)
@gometalinter.v2 --install
# @gometalinter.v2 --install
update_tools:
@echo "--> Updating tools"


+ 5
- 1
cmd/tendermint/commands/root.go View File

@ -18,7 +18,11 @@ var (
)
func init() {
RootCmd.PersistentFlags().String("log_level", config.LogLevel, "Log level")
registerFlagsRootCmd(RootCmd)
}
func registerFlagsRootCmd(cmd *cobra.Command) {
cmd.PersistentFlags().String("log_level", config.LogLevel, "Log level")
}
// ParseConfig retrieves the default environment configuration,


+ 124
- 58
cmd/tendermint/commands/root_test.go View File

@ -1,7 +1,10 @@
package commands
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
@ -12,6 +15,7 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/cli"
cmn "github.com/tendermint/tmlibs/common"
)
var (
@ -22,89 +26,151 @@ const (
rootName = "root"
)
// isolate provides a clean setup and returns a copy of RootCmd you can
// modify in the test cases.
// NOTE: it unsets all TM* env variables.
func isolate(cmds ...*cobra.Command) cli.Executable {
// clearConfig clears env vars, the given root dir, and resets viper.
func clearConfig(dir string) {
if err := os.Unsetenv("TMHOME"); err != nil {
panic(err)
}
if err := os.Unsetenv("TM_HOME"); err != nil {
panic(err)
}
if err := os.RemoveAll(defaultRoot); err != nil {
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
viper.Reset()
config = cfg.DefaultConfig()
r := &cobra.Command{
Use: rootName,
}
// prepare new rootCmd
func testRootCmd() *cobra.Command {
rootCmd := &cobra.Command{
Use: RootCmd.Use,
PersistentPreRunE: RootCmd.PersistentPreRunE,
Run: func(cmd *cobra.Command, args []string) {},
}
r.AddCommand(cmds...)
wr := cli.PrepareBaseCmd(r, "TM", defaultRoot)
return wr
registerFlagsRootCmd(rootCmd)
var l string
rootCmd.PersistentFlags().String("log", l, "Log")
return rootCmd
}
func TestRootConfig(t *testing.T) {
assert, require := assert.New(t), require.New(t)
func testSetup(rootDir string, args []string, env map[string]string) error {
clearConfig(defaultRoot)
// we pre-create a config file we can refer to in the rest of
// the test cases.
cvals := map[string]string{
"moniker": "monkey",
"fast_sync": "false",
rootCmd := testRootCmd()
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
// run with the args and env
args = append([]string{rootCmd.Use}, args...)
return cli.RunWithArgs(cmd, args, env)
}
func TestRootHome(t *testing.T) {
newRoot := filepath.Join(defaultRoot, "something-else")
cases := []struct {
args []string
env map[string]string
root string
}{
{nil, nil, defaultRoot},
{[]string{"--home", newRoot}, nil, newRoot},
{nil, map[string]string{"TMHOME": newRoot}, newRoot},
}
for i, tc := range cases {
idxString := strconv.Itoa(i)
err := testSetup(defaultRoot, tc.args, tc.env)
require.Nil(t, err, idxString)
assert.Equal(t, tc.root, config.RootDir, idxString)
assert.Equal(t, tc.root, config.P2P.RootDir, idxString)
assert.Equal(t, tc.root, config.Consensus.RootDir, idxString)
assert.Equal(t, tc.root, config.Mempool.RootDir, idxString)
}
// proper types of the above settings
cfast := false
conf, err := cli.WriteDemoConfig(cvals)
require.Nil(err)
}
func TestRootFlagsEnv(t *testing.T) {
// defaults
defaults := cfg.DefaultConfig()
dmax := defaults.P2P.MaxNumPeers
defaultLogLvl := defaults.LogLevel
cases := []struct {
args []string
env map[string]string
root string
moniker string
fastSync bool
maxPeer int
logLevel string
}{
{nil, nil, defaultRoot, defaults.Moniker, defaults.FastSync, dmax},
// try multiple ways of setting root (two flags, cli vs. env)
{[]string{"--home", conf}, nil, conf, cvals["moniker"], cfast, dmax},
{nil, map[string]string{"TMHOME": conf}, conf, cvals["moniker"], cfast, dmax},
// check setting p2p subflags two different ways
{[]string{"--p2p.max_num_peers", "420"}, nil, defaultRoot, defaults.Moniker, defaults.FastSync, 420},
{nil, map[string]string{"TM_P2P_MAX_NUM_PEERS": "17"}, defaultRoot, defaults.Moniker, defaults.FastSync, 17},
// try to set env that have no flags attached...
{[]string{"--home", conf}, map[string]string{"TM_MONIKER": "funny"}, conf, "funny", cfast, dmax},
{[]string{"--log", "debug"}, nil, defaultLogLvl}, // wrong flag
{[]string{"--log_level", "debug"}, nil, "debug"}, // right flag
{nil, map[string]string{"TM_LOW": "debug"}, defaultLogLvl}, // wrong env flag
{nil, map[string]string{"MT_LOG_LEVEL": "debug"}, defaultLogLvl}, // wrong env prefix
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
}
for idx, tc := range cases {
i := strconv.Itoa(idx)
// test command that does nothing, except trigger unmarshalling in root
noop := &cobra.Command{
Use: "noop",
RunE: func(cmd *cobra.Command, args []string) error {
return nil
},
}
noop.Flags().Int("p2p.max_num_peers", defaults.P2P.MaxNumPeers, "")
cmd := isolate(noop)
args := append([]string{rootName, noop.Use}, tc.args...)
err := cli.RunWithArgs(cmd, args, tc.env)
require.Nil(err, i)
assert.Equal(tc.root, config.RootDir, i)
assert.Equal(tc.root, config.P2P.RootDir, i)
assert.Equal(tc.root, config.Consensus.RootDir, i)
assert.Equal(tc.root, config.Mempool.RootDir, i)
assert.Equal(tc.moniker, config.Moniker, i)
assert.Equal(tc.fastSync, config.FastSync, i)
assert.Equal(tc.maxPeer, config.P2P.MaxNumPeers, i)
for i, tc := range cases {
idxString := strconv.Itoa(i)
err := testSetup(defaultRoot, tc.args, tc.env)
require.Nil(t, err, idxString)
assert.Equal(t, tc.logLevel, config.LogLevel, idxString)
}
}
func TestRootConfig(t *testing.T) {
// write non-default config
nonDefaultLogLvl := "abc:debug"
cvals := map[string]string{
"log_level": nonDefaultLogLvl,
}
cases := []struct {
args []string
env map[string]string
logLvl string
}{
{nil, nil, nonDefaultLogLvl}, // should load config
{[]string{"--log_level=abc:info"}, nil, "abc:info"}, // flag over rides
{nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides
}
for i, tc := range cases {
idxString := strconv.Itoa(i)
clearConfig(defaultRoot)
// XXX: path must match cfg.defaultConfigPath
configFilePath := filepath.Join(defaultRoot, "config")
err := cmn.EnsureDir(configFilePath, 0700)
require.Nil(t, err)
// write the non-defaults to a different path
// TODO: support writing sub configs so we can test that too
err = WriteConfigVals(configFilePath, cvals)
require.Nil(t, err)
rootCmd := testRootCmd()
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
// run with the args and env
tc.args = append([]string{rootCmd.Use}, tc.args...)
err = cli.RunWithArgs(cmd, tc.args, tc.env)
require.Nil(t, err, idxString)
assert.Equal(t, tc.logLvl, config.LogLevel, idxString)
}
}
// WriteConfigVals writes a toml file with the given values.
// It returns an error if writing was impossible.
func WriteConfigVals(dir string, vals map[string]string) error {
data := ""
for k, v := range vals {
data = data + fmt.Sprintf("%s = \"%s\"\n", k, v)
}
cfile := filepath.Join(dir, "config.toml")
return ioutil.WriteFile(cfile, []byte(data), 0666)
}

+ 21
- 3
config/config.go View File

@ -7,11 +7,12 @@ import (
"time"
)
// Note: Most of the structs & relevant comments + the
// NOTE: Most of the structs & relevant comments + the
// default configuration options were used to manually
// generate the config.toml. Please reflect any changes
// made here in the defaultConfigTemplate constant in
// config/toml.go
// NOTE: tmlibs/cli must know to look in the config dir!
var (
DefaultTendermintDir = ".tendermint"
defaultConfigDir = "config"
@ -59,9 +60,9 @@ func TestConfig() *Config {
BaseConfig: TestBaseConfig(),
RPC: TestRPCConfig(),
P2P: TestP2PConfig(),
Mempool: DefaultMempoolConfig(),
Mempool: TestMempoolConfig(),
Consensus: TestConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
TxIndex: TestTxIndexConfig(),
}
}
@ -293,6 +294,7 @@ func TestP2PConfig() *P2PConfig {
conf := DefaultP2PConfig()
conf.ListenAddress = "tcp://0.0.0.0:36656"
conf.SkipUPNP = true
conf.FlushThrottleTimeout = 10
return conf
}
@ -311,6 +313,7 @@ type MempoolConfig struct {
RecheckEmpty bool `mapstructure:"recheck_empty"`
Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal_dir"`
CacheSize int `mapstructure:"cache_size"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
@ -320,9 +323,17 @@ func DefaultMempoolConfig() *MempoolConfig {
RecheckEmpty: true,
Broadcast: true,
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
CacheSize: 100000,
}
}
// TestMempoolConfig returns a configuration for testing the Tendermint mempool
func TestMempoolConfig() *MempoolConfig {
config := DefaultMempoolConfig()
config.CacheSize = 1000
return config
}
// WalDir returns the full path to the mempool's write-ahead log
func (m *MempoolConfig) WalDir() string {
return rootify(m.WalPath, m.RootDir)
@ -437,6 +448,8 @@ func TestConsensusConfig() *ConsensusConfig {
config.TimeoutPrecommitDelta = 1
config.TimeoutCommit = 10
config.SkipTimeoutCommit = true
config.PeerGossipSleepDuration = 5
config.PeerQueryMaj23SleepDuration = 50
return config
}
@ -488,6 +501,11 @@ func DefaultTxIndexConfig() *TxIndexConfig {
}
}
// TestTxIndexConfig returns a default configuration for the transaction indexer.
func TestTxIndexConfig() *TxIndexConfig {
return DefaultTxIndexConfig()
}
//-----------------------------------------------------------------------------
// Utils


+ 2
- 0
config/toml.go View File

@ -20,6 +20,8 @@ func init() {
/****** these are for production settings ***********/
// EnsureRoot creates the root, config, and data directories if they don't exist,
// and panics if it fails.
func EnsureRoot(rootDir string) {
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
cmn.PanicSanity(err.Error())


+ 2
- 2
consensus/common_test.go View File

@ -36,8 +36,8 @@ const (
)
// genesis, chain_id, priv_val
var config *cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Second * 2
var config *cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Second * 1 // must be in seconds because CreateEmptyBlocksInterval is
func ensureDir(dir string, mode os.FileMode) {
if err := cmn.EnsureDir(dir, mode); err != nil {


+ 5
- 5
consensus/mempool_test.go View File

@ -19,7 +19,7 @@ func init() {
config = ResetConfig("consensus_mempool_test")
}
func TestNoProgressUntilTxsAvailable(t *testing.T) {
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
@ -37,7 +37,7 @@ func TestNoProgressUntilTxsAvailable(t *testing.T) {
ensureNoNewStep(newBlockCh)
}
func TestProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds())
state, privVals := randGenesisState(1, false, 10)
@ -52,7 +52,7 @@ func TestProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
ensureNewStep(newBlockCh) // until the CreateEmptyBlocksInterval has passed
}
func TestProgressInHigherRound(t *testing.T) {
func TestMempoolProgressInHigherRound(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
@ -94,7 +94,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
}
}
func TestTxConcurrentWithCommit(t *testing.T) {
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusState(state, privVals[0], NewCounterApplication())
height, round := cs.Height, cs.Round
@ -116,7 +116,7 @@ func TestTxConcurrentWithCommit(t *testing.T) {
}
}
func TestRmBadTx(t *testing.T) {
func TestMempoolRmBadTx(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
app := NewCounterApplication()
cs := newConsensusState(state, privVals[0], app)


+ 21
- 35
consensus/reactor_test.go View File

@ -31,31 +31,24 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*Consensus
reactors := make([]*ConsensusReactor, N)
eventChans := make([]chan interface{}, N)
eventBuses := make([]*types.EventBus, N)
logger := consensusLogger()
for i := 0; i < N; i++ {
/*thisLogger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
if err != nil { t.Fatal(err)}*/
thisLogger := logger
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
reactors[i].conS.SetLogger(thisLogger.With("validator", i))
reactors[i].SetLogger(thisLogger.With("validator", i))
eventBuses[i] = types.NewEventBus()
eventBuses[i].SetLogger(thisLogger.With("module", "events", "validator", i))
err := eventBuses[i].Start()
require.NoError(t, err)
reactors[i].SetLogger(css[i].Logger.With("validator", "i", "module", "consensus"))
// eventBus is already started with the cs
eventBuses[i] = css[i].eventBus
reactors[i].SetEventBus(eventBuses[i])
eventChans[i] = make(chan interface{}, 1)
err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
require.NoError(t, err)
}
// make connected switches and start all reactors
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("CONSENSUS", reactors[i])
s.SetLogger(reactors[i].Logger.With("module", "p2p", "validator", i))
s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
return s
}, p2p.Connect2Switches)
@ -84,15 +77,14 @@ func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuse
}
// Ensure a testnet makes blocks
func TestReactor(t *testing.T) {
func TestReactorBasic(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
timeoutWaitGroup(t, N, func(j int) {
<-eventChans[j]
wg.Done()
}, css)
}
@ -113,9 +105,8 @@ func TestReactorProposalHeartbeats(t *testing.T) {
require.NoError(t, err)
}
// wait till everyone sends a proposal heartbeat
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
timeoutWaitGroup(t, N, func(j int) {
<-heartbeatChans[j]
wg.Done()
}, css)
// send a tx
@ -124,9 +115,8 @@ func TestReactorProposalHeartbeats(t *testing.T) {
}
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
timeoutWaitGroup(t, N, func(j int) {
<-eventChans[j]
wg.Done()
}, css)
}
@ -147,9 +137,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
}
// wait till everyone makes block 1
timeoutWaitGroup(t, nVals, func(wg *sync.WaitGroup, j int) {
timeoutWaitGroup(t, nVals, func(j int) {
<-eventChans[j]
wg.Done()
}, css)
//---------------------------------------------------------------------------
@ -210,9 +199,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
}
// wait till everyone makes block 1
timeoutWaitGroup(t, nPeers, func(wg *sync.WaitGroup, j int) {
timeoutWaitGroup(t, nPeers, func(j int) {
<-eventChans[j]
wg.Done()
}, css)
//---------------------------------------------------------------------------
@ -300,16 +288,13 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N-1, func(wg *sync.WaitGroup, j int) {
timeoutWaitGroup(t, N-1, func(j int) {
<-eventChans[j]
wg.Done()
}, css)
}
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
defer wg.Done()
timeoutWaitGroup(t, n, func(j int) {
css[j].Logger.Debug("waitForAndValidateBlock")
newBlockI, ok := <-eventChans[j]
if !ok {
@ -327,8 +312,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
}
func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
defer wg.Done()
timeoutWaitGroup(t, n, func(j int) {
ntxs := 0
BLOCK_TX_LOOP:
for {
@ -359,8 +343,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
}
func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
defer wg.Done()
timeoutWaitGroup(t, n, func(j int) {
var newBlock *types.Block
LOOP:
@ -398,11 +381,14 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
return nil
}
func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []*ConsensusState) {
func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*ConsensusState) {
wg := new(sync.WaitGroup)
wg.Add(n)
for i := 0; i < n; i++ {
go f(wg, i)
go func(j int) {
f(j)
wg.Done()
}(i)
}
done := make(chan struct{})


+ 3
- 3
consensus/replay_test.go View File

@ -81,13 +81,13 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
}
func sendTxs(cs *ConsensusState, ctx context.Context) {
i := 0
for {
for i := 0; i < 256; i++ {
select {
case <-ctx.Done():
return
default:
cs.mempool.CheckTx([]byte{byte(i)}, nil)
tx := []byte{byte(i)}
cs.mempool.CheckTx(tx, nil)
i++
}
}


+ 16
- 16
consensus/state_test.go View File

@ -55,7 +55,7 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh
//----------------------------------------------------------------------------------------------------
// ProposeSuite
func TestProposerSelection0(t *testing.T) {
func TestStateProposerSelection0(t *testing.T) {
cs1, vss := randConsensusState(4)
height, round := cs1.Height, cs1.Round
@ -89,7 +89,7 @@ func TestProposerSelection0(t *testing.T) {
}
// Now let's do it all again, but starting from round 2 instead of 0
func TestProposerSelection2(t *testing.T) {
func TestStateProposerSelection2(t *testing.T) {
cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
@ -118,7 +118,7 @@ func TestProposerSelection2(t *testing.T) {
}
// a non-validator should timeout into the prevote round
func TestEnterProposeNoPrivValidator(t *testing.T) {
func TestStateEnterProposeNoPrivValidator(t *testing.T) {
cs, _ := randConsensusState(1)
cs.SetPrivValidator(nil)
height, round := cs.Height, cs.Round
@ -143,7 +143,7 @@ func TestEnterProposeNoPrivValidator(t *testing.T) {
}
// a validator should not timeout of the prevote round (TODO: unless the block is really big!)
func TestEnterProposeYesPrivValidator(t *testing.T) {
func TestStateEnterProposeYesPrivValidator(t *testing.T) {
cs, _ := randConsensusState(1)
height, round := cs.Height, cs.Round
@ -179,7 +179,7 @@ func TestEnterProposeYesPrivValidator(t *testing.T) {
}
}
func TestBadProposal(t *testing.T) {
func TestStateBadProposal(t *testing.T) {
cs1, vss := randConsensusState(2)
height, round := cs1.Height, cs1.Round
vs2 := vss[1]
@ -239,7 +239,7 @@ func TestBadProposal(t *testing.T) {
// FullRoundSuite
// propose, prevote, and precommit a block
func TestFullRound1(t *testing.T) {
func TestStateFullRound1(t *testing.T) {
cs, vss := randConsensusState(1)
height, round := cs.Height, cs.Round
@ -275,7 +275,7 @@ func TestFullRound1(t *testing.T) {
}
// nil is proposed, so prevote and precommit nil
func TestFullRoundNil(t *testing.T) {
func TestStateFullRoundNil(t *testing.T) {
cs, vss := randConsensusState(1)
height, round := cs.Height, cs.Round
@ -293,7 +293,7 @@ func TestFullRoundNil(t *testing.T) {
// run through propose, prevote, precommit commit with two validators
// where the first validator has to wait for votes from the second
func TestFullRound2(t *testing.T) {
func TestStateFullRound2(t *testing.T) {
cs1, vss := randConsensusState(2)
vs2 := vss[1]
height, round := cs1.Height, cs1.Round
@ -334,7 +334,7 @@ func TestFullRound2(t *testing.T) {
// two validators, 4 rounds.
// two vals take turns proposing. val1 locks on first one, precommits nil on everything else
func TestLockNoPOL(t *testing.T) {
func TestStateLockNoPOL(t *testing.T) {
cs1, vss := randConsensusState(2)
vs2 := vss[1]
height := cs1.Height
@ -503,7 +503,7 @@ func TestLockNoPOL(t *testing.T) {
}
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestLockPOLRelock(t *testing.T) {
func TestStateLockPOLRelock(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
@ -618,7 +618,7 @@ func TestLockPOLRelock(t *testing.T) {
}
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestLockPOLUnlock(t *testing.T) {
func TestStateLockPOLUnlock(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
@ -715,7 +715,7 @@ func TestLockPOLUnlock(t *testing.T) {
// a polka at round 1 but we miss it
// then a polka at round 2 that we lock on
// then we see the polka from round 1 but shouldn't unlock
func TestLockPOLSafety1(t *testing.T) {
func TestStateLockPOLSafety1(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
@ -838,7 +838,7 @@ func TestLockPOLSafety1(t *testing.T) {
// What we want:
// dont see P0, lock on P1 at R1, dont unlock using P0 at R2
func TestLockPOLSafety2(t *testing.T) {
func TestStateLockPOLSafety2(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
@ -937,7 +937,7 @@ func TestLockPOLSafety2(t *testing.T) {
// TODO: Slashing
/*
func TestSlashingPrevotes(t *testing.T) {
func TestStateSlashingPrevotes(t *testing.T) {
cs1, vss := randConsensusState(2)
vs2 := vss[1]
@ -972,7 +972,7 @@ func TestSlashingPrevotes(t *testing.T) {
// XXX: Check for existence of Dupeout info
}
func TestSlashingPrecommits(t *testing.T) {
func TestStateSlashingPrecommits(t *testing.T) {
cs1, vss := randConsensusState(2)
vs2 := vss[1]
@ -1017,7 +1017,7 @@ func TestSlashingPrecommits(t *testing.T) {
// 4 vals.
// we receive a final precommit after going into next round, but others might have gone to commit already!
func TestHalt1(t *testing.T) {
func TestStateHalt1(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]


+ 1
- 1
consensus/wal_test.go View File

@ -41,7 +41,7 @@ func TestWALEncoderDecoder(t *testing.T) {
}
}
func TestSearchForEndHeight(t *testing.T) {
func TestWALSearchForEndHeight(t *testing.T) {
walBody, err := WALWithNBlocks(6)
if err != nil {
t.Fatal(err)


+ 11
- 11
glide.lock View File

@ -1,5 +1,5 @@
hash: 072c8e685dd519c1f509da67379b70451a681bf3ef6cbd82900a1f68c55bbe16
updated: 2017-12-29T11:08:17.355999228-05:00
hash: 9399a10e80d255104f8ec07b5d495c41d8a3f7a421f9da97ebd78c65189f381d
updated: 2018-01-18T23:11:10.703734578-05:00
imports:
- name: github.com/btcsuite/btcd
version: 2e60448ffcc6bf78332d1fe590260095f554dd78
@ -10,7 +10,7 @@ imports:
- name: github.com/fsnotify/fsnotify
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
- name: github.com/go-kit/kit
version: 953e747656a7bbb5e1f998608b460458958b70cc
version: 53f10af5d5c7375d4655a3d6852457ed17ab5cc7
subpackages:
- log
- log/level
@ -68,13 +68,13 @@ imports:
- name: github.com/mitchellh/mapstructure
version: 06020f85339e21b2478f756a78e295255ffa4d6a
- name: github.com/pelletier/go-toml
version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224
version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd
- name: github.com/pkg/errors
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/rcrowley/go-metrics
version: e181e095bae94582363434144c61a9653aff6e50
- name: github.com/spf13/afero
version: 57afd63c68602b63ed976de00dd066ccb3c319db
version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536
subpackages:
- mem
- name: github.com/spf13/cast
@ -88,7 +88,7 @@ imports:
- name: github.com/spf13/viper
version: 25b30aa063fc18e48662b86996252eabdcf2f0c7
- name: github.com/syndtr/goleveldb
version: 34011bf325bce385408353a30b101fe5e923eb6e
version: adf24ef3f94bd13ec4163060b21a5678f22b429b
subpackages:
- leveldb
- leveldb/cache
@ -129,7 +129,7 @@ imports:
subpackages:
- iavl
- name: github.com/tendermint/tmlibs
version: 91b4b534ad78e442192c8175db92a06a51064064
version: 15e51fa76086a3c505f227679c2478043ae7261b
subpackages:
- autofile
- cli
@ -144,7 +144,7 @@ imports:
- pubsub/query
- test
- name: golang.org/x/crypto
version: 95a4943f35d008beabde8c11e5075a1b714e6419
version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122
subpackages:
- curve25519
- nacl/box
@ -165,11 +165,11 @@ imports:
- lex/httplex
- trace
- name: golang.org/x/sys
version: 83801418e1b59fb1880e363299581ee543af32ca
version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840
subpackages:
- unix
- name: golang.org/x/text
version: e19ae1496984b1c655b8044a65c0300a3c878dd3
version: 57961680700a5336d15015c8c50686ca5ba362a4
subpackages:
- secure/bidirule
- transform
@ -199,7 +199,7 @@ imports:
- tap
- transport
- name: gopkg.in/go-playground/validator.v9
version: b1f51f36f1c98cc97f777d6fc9d4b05eaa0cabb5
version: 61caf9d3038e1af346dbf5c2e16f6678e1548364
- name: gopkg.in/yaml.v2
version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5
testImports:


+ 1
- 1
glide.yaml View File

@ -34,7 +34,7 @@ import:
subpackages:
- iavl
- package: github.com/tendermint/tmlibs
version: v0.6.0
version: v0.6.1
subpackages:
- autofile
- cli


+ 5
- 7
mempool/mempool.go View File

@ -3,7 +3,6 @@ package mempool
import (
"bytes"
"container/list"
"fmt"
"sync"
"sync/atomic"
"time"
@ -49,7 +48,7 @@ TODO: Better handle abci client errors. (make it automatically handle connection
*/
const cacheSize = 100000
var ErrTxInCache = errors.New("Tx already exists in cache")
// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus
// round. Transaction validity is checked using the CheckTx abci message before the transaction is
@ -92,9 +91,8 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he
recheckCursor: nil,
recheckEnd: nil,
logger: log.NewNopLogger(),
cache: newTxCache(cacheSize),
cache: newTxCache(config.CacheSize),
}
mempool.initWAL()
proxyAppConn.SetResponseCallback(mempool.resCb)
return mempool
}
@ -131,7 +129,7 @@ func (mem *Mempool) CloseWAL() bool {
return true
}
func (mem *Mempool) initWAL() {
func (mem *Mempool) InitWAL() {
walDir := mem.config.WalDir()
if walDir != "" {
err := cmn.EnsureDir(walDir, 0700)
@ -192,7 +190,7 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
// CACHE
if mem.cache.Exists(tx) {
return fmt.Errorf("Tx already exists in cache")
return ErrTxInCache
}
mem.cache.Push(tx)
// END CACHE
@ -449,7 +447,7 @@ func newTxCache(cacheSize int) *txCache {
// Reset resets the txCache to empty.
func (cache *txCache) Reset() {
cache.mtx.Lock()
cache.map_ = make(map[string]struct{}, cacheSize)
cache.map_ = make(map[string]struct{}, cache.size)
cache.list.Init()
cache.mtx.Unlock()
}


+ 3
- 2
mempool/mempool_test.go View File

@ -236,12 +236,13 @@ func TestMempoolCloseWAL(t *testing.T) {
require.Equal(t, 0, len(m1), "no matches yet")
// 3. Create the mempool
wcfg := *(cfg.DefaultMempoolConfig())
wcfg := cfg.DefaultMempoolConfig()
wcfg.RootDir = rootDir
app := dummy.NewDummyApplication()
cc := proxy.NewLocalClientCreator(app)
appConnMem, _ := cc.NewABCIClient()
mempool := NewMempool(&wcfg, appConnMem, 10)
mempool := NewMempool(wcfg, appConnMem, 10)
mempool.InitWAL()
// 4. Ensure that the directory contains the WAL file
m2, err := filepath.Glob(filepath.Join(rootDir, "*"))


+ 1
- 0
node/node.go View File

@ -189,6 +189,7 @@ func NewNode(config *cfg.Config,
// Make MempoolReactor
mempoolLogger := logger.With("module", "mempool")
mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool(), state.LastBlockHeight)
mempool.InitWAL() // no need to have the mempool wal during tests
mempool.SetLogger(mempoolLogger)
mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
mempoolReactor.SetLogger(mempoolLogger)


+ 3
- 2
p2p/test_util.go View File

@ -97,7 +97,9 @@ func makeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
nodeKey := &NodeKey{
PrivKey: crypto.GenPrivKeyEd25519().Wrap(),
}
s := initSwitch(i, NewSwitch(cfg))
s := NewSwitch(cfg)
s.SetLogger(log.TestingLogger())
s = initSwitch(i, s)
s.SetNodeInfo(NodeInfo{
PubKey: nodeKey.PubKey(),
Moniker: cmn.Fmt("switch%d", i),
@ -106,6 +108,5 @@ func makeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
})
s.SetNodeKey(nodeKey)
s.SetLogger(log.TestingLogger())
return s
}

+ 0
- 3
test/run_test.sh View File

@ -6,9 +6,6 @@ pwd
BRANCH=$(git rev-parse --abbrev-ref HEAD)
echo "Current branch: $BRANCH"
# run the linter
make metalinter
# run the go unit tests with coverage
bash test/test_cover.sh


Loading…
Cancel
Save