Browse Source

improve ResetTestRootWithChainID() concurrency safety (#3291)

* improve ResetTestRootWithChainID() concurrency safety

Rely on ioutil.TempDir() to create test root directories and ensure
multiple same-chain id test cases can run in parallel.

* Update config/toml.go

Co-Authored-By: alessio <quadrispro@ubuntu.com>

* clean up test directories after completion

Closes: #1034

* Remove redundant EnsureDir call

* s/PanicSafety()/panic()/s

* Put create dir functionality back in ResetTestRootWithChainID

* Place test directories in OS's tempdir

In modern UNIX and UNIX-like systems /tmp is very often
mounted as tmpfs. This might speed test execution a bit.

* Set 0700 to a const

* rootsDirs -> configRootDirs

* Don't double remove directories

* Avoid global variables

* Fix consensus tests

* Reduce defer stack

* Address review comments

* Try to fix tests

* Update CHANGELOG_PENDING.md

Co-Authored-By: alessio <quadrispro@ubuntu.com>

* Update consensus/common_test.go

Co-Authored-By: alessio <quadrispro@ubuntu.com>

* Update consensus/common_test.go

Co-Authored-By: alessio <quadrispro@ubuntu.com>
pull/3323/head
Alessio Treglia 5 years ago
committed by Anton Kaliaev
parent
commit
59cc6d36c9
24 changed files with 201 additions and 132 deletions
  1. +2
    -0
      CHANGELOG_PENDING.md
  2. +3
    -0
      blockchain/reactor_test.go
  3. +28
    -10
      blockchain/store_test.go
  4. +16
    -25
      config/toml.go
  5. +1
    -0
      config/toml_test.go
  6. +2
    -5
      consensus/byzantine_test.go
  7. +29
    -6
      consensus/common_test.go
  8. +4
    -4
      consensus/mempool_test.go
  9. +13
    -11
      consensus/reactor_test.go
  10. +41
    -22
      consensus/replay_test.go
  11. +0
    -4
      consensus/state_test.go
  12. +5
    -1
      consensus/types/height_vote_set_test.go
  13. +8
    -20
      consensus/wal_generator.go
  14. +2
    -2
      consensus/wal_test.go
  15. +3
    -1
      lite/client/provider_test.go
  16. +3
    -1
      lite/proxy/query_test.go
  17. +2
    -1
      mempool/bench_test.go
  18. +16
    -8
      mempool/mempool_test.go
  19. +2
    -1
      mempool/reactor_test.go
  20. +7
    -0
      node/node_test.go
  21. +3
    -1
      rpc/client/main_test.go
  22. +2
    -1
      rpc/grpc/grpc_test.go
  23. +5
    -5
      rpc/test/helpers.go
  24. +4
    -3
      state/state_test.go

+ 2
- 0
CHANGELOG_PENDING.md View File

@ -20,6 +20,8 @@ Special thanks to external contributors on this release:
### IMPROVEMENTS:
- [config] \#3291 Make config.ResetTestRootWithChainID() create concurrency-safe test directories.
### BUG FIXES:
* [consensus] \#3297 Flush WAL on stop to prevent data corruption during


+ 3
- 0
blockchain/reactor_test.go View File

@ -1,6 +1,7 @@
package blockchain
import (
"os"
"sort"
"testing"
"time"
@ -125,6 +126,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
func TestNoBlockResponse(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(65)
@ -184,6 +186,7 @@ func TestNoBlockResponse(t *testing.T) {
// that seems extreme.
func TestBadBlockStopsPeer(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(148)


+ 28
- 10
blockchain/store_test.go View File

@ -3,6 +3,7 @@ package blockchain
import (
"bytes"
"fmt"
"os"
"runtime/debug"
"strings"
"testing"
@ -21,13 +22,16 @@ import (
tmtime "github.com/tendermint/tendermint/types/time"
)
// A cleanupFunc cleans up any config / test files created for a particular test.
type cleanupFunc func()
// make a Commit with a single vote containing just the height and a timestamp
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}}
return types.NewCommit(types.BlockID{}, commitSigs)
}
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
@ -37,7 +41,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
if err != nil {
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
}
return state, NewBlockStore(blockDB)
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
}
func TestLoadBlockStoreStateJSON(t *testing.T) {
@ -87,19 +91,32 @@ func freshBlockStore() (*BlockStore, db.DB) {
}
var (
state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
state sm.State
block *types.Block
partSet *types.PartSet
part1 *types.Part
part2 *types.Part
seenCommit1 *types.Commit
)
block = makeBlock(1, state, new(types.Commit))
partSet = block.MakePartSet(2)
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
func TestMain(m *testing.M) {
var cleanup cleanupFunc
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
block = makeBlock(1, state, new(types.Commit))
partSet = block.MakePartSet(2)
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
seenCommit1 = makeTestCommit(10, tmtime.Now())
)
code := m.Run()
cleanup()
os.Exit(code)
}
// TODO: This test should be simplified ...
func TestBlockStoreSaveLoadBlock(t *testing.T) {
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
defer cleanup()
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
// check there are no blocks at various heights
@ -350,7 +367,8 @@ func TestLoadBlockMeta(t *testing.T) {
}
func TestBlockFetchAtHeight(t *testing.T) {
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
defer cleanup()
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
block := makeBlock(bs.Height()+1, state, new(types.Commit))


+ 16
- 25
config/toml.go View File

@ -3,13 +3,16 @@ package config
import (
"bytes"
"fmt"
"os"
"io/ioutil"
"path/filepath"
"text/template"
cmn "github.com/tendermint/tendermint/libs/common"
)
// DefaultDirPerm is the default permissions used when creating directories.
const DefaultDirPerm = 0700
var configTemplate *template.Template
func init() {
@ -24,13 +27,13 @@ func init() {
// EnsureRoot creates the root, config, and data directories if they don't exist,
// and panics if it fails.
func EnsureRoot(rootDir string) {
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
}
@ -322,29 +325,17 @@ func ResetTestRoot(testName string) *Config {
}
func ResetTestRootWithChainID(testName string, chainID string) *Config {
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
rootDir = filepath.Join(rootDir, testName)
// Remove ~/.tendermint_test_bak
if cmn.FileExists(rootDir + "_bak") {
if err := os.RemoveAll(rootDir + "_bak"); err != nil {
cmn.PanicSanity(err.Error())
}
}
// Move ~/.tendermint_test to ~/.tendermint_test_bak
if cmn.FileExists(rootDir) {
if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
cmn.PanicSanity(err.Error())
}
}
// Create new dir
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
cmn.PanicSanity(err.Error())
// create a unique, concurrency-safe test directory under os.TempDir()
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
if err != nil {
panic(err)
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
cmn.PanicSanity(err.Error())
// ensure config and data subdirs are created
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
panic(err)
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
cmn.PanicSanity(err.Error())
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
panic(err)
}
baseConfig := DefaultBaseConfig()


+ 1
- 0
config/toml_test.go View File

@ -48,6 +48,7 @@ func TestEnsureTestRoot(t *testing.T) {
// create root dir
cfg := ResetTestRoot(testName)
defer os.RemoveAll(cfg.RootDir)
rootDir := cfg.RootDir
// make sure config is set properly


+ 2
- 5
consensus/byzantine_test.go View File

@ -13,10 +13,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_byzantine_test")
}
//----------------------------------------------
// byzantine failures
@ -29,7 +25,8 @@ func init() {
func TestByzantine(t *testing.T) {
N := 4
logger := consensusLogger().With("test", "byzantine")
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
defer cleanup()
// give the byzantine validator a normal ticker
ticker := NewTimeoutTicker()


+ 29
- 6
consensus/common_test.go View File

@ -37,8 +37,12 @@ const (
testSubscriber = "test-client"
)
// A cleanupFunc cleans up any config / test files created for a particular test.
type cleanupFunc func()
// genesis, chain_id, priv_val
var config *cfg.Config // NOTE: must be reset for each _test.go file
var consensusReplayConfig *cfg.Config
var ensureTimeout = time.Millisecond * 100
func ensureDir(dir string, mode os.FileMode) {
@ -248,6 +252,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
// consensus states
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
config := cfg.ResetTestRoot("consensus_state_test")
return newConsensusStateWithConfig(config, state, pv, app)
}
@ -406,7 +411,7 @@ func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
}
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
timeoutDuration := time.Duration(timeout*3) * time.Nanosecond
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
"Timeout expired while waiting for NewTimeout event")
}
@ -560,14 +565,17 @@ func consensusLogger() log.Logger {
}).With("module", "consensus")
}
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState {
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*ConsensusState, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
css := make([]*ConsensusState, nValidators)
logger := consensusLogger()
configRootDirs := make([]string, 0, nValidators)
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
for _, opt := range configOpts {
opt(thisConfig)
}
@ -580,18 +588,26 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
return css
return css, func() {
for _, dir := range configRootDirs {
os.RemoveAll(dir)
}
}
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
css := make([]*ConsensusState, nPeers)
logger := consensusLogger()
configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
var privVal types.PrivValidator
if i < nValidators {
@ -617,7 +633,11 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
return css
return css, func() {
for _, dir := range configRootDirs {
os.RemoveAll(dir)
}
}
}
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
@ -713,6 +733,9 @@ func newCounter() abci.Application {
}
func newPersistentKVStore() abci.Application {
dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore")
dir, err := ioutil.TempDir("", "persistent-kvstore")
if err != nil {
panic(err)
}
return kvstore.NewPersistentKVStoreApplication(dir)
}

+ 4
- 4
consensus/mempool_test.go View File

@ -3,6 +3,7 @@ package consensus
import (
"encoding/binary"
"fmt"
"os"
"testing"
"time"
@ -14,10 +15,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_mempool_test")
}
// for testing
func assertMempool(txn txNotifier) sm.Mempool {
return txn.(sm.Mempool)
@ -25,6 +22,7 @@ func assertMempool(txn txNotifier) sm.Mempool {
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
@ -43,6 +41,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
@ -58,6 +57,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
func TestMempoolProgressInHigherRound(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
defer os.RemoveAll(config.RootDir)
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())


+ 13
- 11
consensus/reactor_test.go View File

@ -27,10 +27,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_reactor_test")
}
//----------------------------------------------
// in-process testnets
@ -86,7 +82,8 @@ func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuse
// Ensure a testnet makes blocks
func TestReactorBasic(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
// wait till everyone makes the first new block
@ -116,6 +113,7 @@ func TestReactorWithEvidence(t *testing.T) {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
@ -218,10 +216,11 @@ func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false }
// Ensure a testnet makes blocks when there are txs
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
func(c *cfg.Config) {
c.Consensus.CreateEmptyBlocks = false
})
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@ -239,7 +238,8 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
// Test we record stats about votes and block parts from other peers.
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@ -263,7 +263,8 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
func TestReactorVotingPowerChange(t *testing.T) {
nVals := 4
logger := log.TestingLogger()
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
defer cleanup()
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
defer stopConsensusNet(logger, reactors, eventBuses)
@ -324,8 +325,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
func TestReactorValidatorSetChanges(t *testing.T) {
nPeers := 7
nVals := 4
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
defer cleanup()
logger := log.TestingLogger()
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
@ -422,7 +423,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
// Check we can make blocks with skip_timeout_commit=false
func TestReactorWithTimeoutCommit(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
defer cleanup()
// override default SkipTimeoutCommit == true for tests
for i := 0; i < N; i++ {
css[i].config.SkipTimeoutCommit = false


+ 41
- 22
consensus/replay_test.go View File

@ -8,6 +8,7 @@ import (
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@ -30,10 +31,19 @@ import (
"github.com/tendermint/tendermint/types"
)
var consensusReplayConfig *cfg.Config
func init() {
func TestMain(m *testing.M) {
config = ResetConfig("consensus_reactor_test")
consensusReplayConfig = ResetConfig("consensus_replay_test")
configStateTest := ResetConfig("consensus_state_test")
configMempoolTest := ResetConfig("consensus_mempool_test")
configByzantineTest := ResetConfig("consensus_byzantine_test")
code := m.Run()
os.RemoveAll(config.RootDir)
os.RemoveAll(consensusReplayConfig.RootDir)
os.RemoveAll(configStateTest.RootDir)
os.RemoveAll(configMempoolTest.RootDir)
os.RemoveAll(configByzantineTest.RootDir)
os.Exit(code)
}
// These tests ensure we can always recover from failure at any part of the consensus process.
@ -51,7 +61,8 @@ func init() {
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
logger := log.TestingLogger()
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig)
@ -59,7 +70,6 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
cs.SetLogger(logger)
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
t.Logf("====== WAL: \n\r%X\n", bytes)
err := cs.Start()
@ -110,21 +120,22 @@ func TestWALCrash(t *testing.T) {
3},
}
for _, tc := range testCases {
for i, tc := range testCases {
consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i))
t.Run(tc.name, func(t *testing.T) {
crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop)
crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop)
})
}
}
func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
walPaniced := make(chan error)
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
i := 1
LOOP:
for {
// fmt.Printf("====== LOOP %d\n", i)
t.Logf("====== LOOP %d\n", i)
// create consensus state from a clean slate
@ -142,6 +153,7 @@ LOOP:
// clean up WAL file from the previous iteration
walFile := cs.config.WalFile()
ensureDir(filepath.Dir(walFile), 0700)
os.Remove(walFile)
// set crashing WAL
@ -163,7 +175,7 @@ LOOP:
t.Logf("WAL paniced: %v", err)
// make sure we can make blocks after a crash
startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB)
startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
// stop consensus state and transactions sender (initFn)
cs.Stop()
@ -269,29 +281,37 @@ var modes = []uint{0, 1, 2}
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 0, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, 0, m)
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 1, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, 1, m)
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS-1, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, NUM_BLOCKS-1, m)
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS, m)
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, NUM_BLOCKS, m)
}
}
@ -311,10 +331,8 @@ func tempWALWithData(data []byte) string {
}
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
config := ResetConfig("proxy_test_")
walBody, err := WALWithNBlocks(NUM_BLOCKS)
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
walBody, err := WALWithNBlocks(t, NUM_BLOCKS)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
@ -631,6 +649,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
clientCreator := proxy.NewLocalClientCreator(app)
config := ResetConfig("proxy_test_")
defer os.RemoveAll(config.RootDir)
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)


+ 0
- 4
consensus/state_test.go View File

@ -18,10 +18,6 @@ import (
"github.com/tendermint/tendermint/types"
)
func init() {
config = ResetConfig("consensus_state_test")
}
/*
ProposeSuite


+ 5
- 1
consensus/types/height_vote_set_test.go View File

@ -2,6 +2,7 @@ package types
import (
"fmt"
"os"
"testing"
cfg "github.com/tendermint/tendermint/config"
@ -11,8 +12,11 @@ import (
var config *cfg.Config // NOTE: must be reset for each _test.go file
func init() {
func TestMain(m *testing.M) {
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
code := m.Run()
os.RemoveAll(config.RootDir)
os.Exit(code)
}
func TestPeerCatchupRounds(t *testing.T) {


+ 8
- 20
consensus/wal_generator.go View File

@ -7,7 +7,7 @@ import (
"io"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/pkg/errors"
@ -28,8 +28,9 @@ import (
// stripped down version of node (proxy app, event bus, consensus state) with a
// persistent kvstore application and special consensus wal instance
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
config := getConfig()
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
config := getConfig(t)
defer os.RemoveAll(config.RootDir)
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
@ -102,11 +103,11 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
}
//WALWithNBlocks returns a WAL content with numBlocks.
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) {
var b bytes.Buffer
wr := bufio.NewWriter(&b)
if err := WALGenerateNBlocks(wr, numBlocks); err != nil {
if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil {
return []byte{}, err
}
@ -114,18 +115,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
return b.Bytes(), nil
}
// f**ing long, but unique for each test
func makePathname() string {
// get path
p, err := os.Getwd()
if err != nil {
panic(err)
}
// fmt.Println(p)
sep := string(filepath.Separator)
return strings.Replace(p, sep, "_", -1)
}
func randPort() int {
// returns between base and base + spread
base, spread := 20000, 20000
@ -140,9 +129,8 @@ func makeAddrs() (string, string, string) {
}
// getConfig returns a config for test cases
func getConfig() *cfg.Config {
pathname := makePathname()
c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt()))
func getConfig(t *testing.T) *cfg.Config {
c := cfg.ResetTestRoot(t.Name())
// and we use random ports to run in parallel
tm, rpc, grpc := makeAddrs()


+ 2
- 2
consensus/wal_test.go View File

@ -48,7 +48,7 @@ func TestWALTruncate(t *testing.T) {
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
//at this time, RotateFile is called, truncate content exist in each file.
err = WALGenerateNBlocks(wal.Group(), 60)
err = WALGenerateNBlocks(t, wal.Group(), 60)
require.NoError(t, err)
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
@ -116,7 +116,7 @@ func TestWALWritePanicsIfMsgIsTooBig(t *testing.T) {
}
func TestWALSearchForEndHeight(t *testing.T) {
walBody, err := WALWithNBlocks(6)
walBody, err := WALWithNBlocks(t, 6)
if err != nil {
t.Fatal(err)
}


+ 3
- 1
lite/client/provider_test.go View File

@ -15,7 +15,8 @@ import (
func TestMain(m *testing.M) {
app := kvstore.NewKVStoreApplication()
node := rpctest.StartTendermint(app)
node, cleanup := rpctest.StartTendermint(app)
defer cleanup()
code := m.Run()
@ -28,6 +29,7 @@ func TestProvider(t *testing.T) {
assert, require := assert.New(t), require.New(t)
cfg := rpctest.GetConfig()
defer os.RemoveAll(cfg.RootDir)
rpcAddr := cfg.RPC.ListenAddress
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {


+ 3
- 1
lite/proxy/query_test.go View File

@ -27,13 +27,15 @@ var waitForEventTimeout = 5 * time.Second
// TODO fix tests!!
func TestMain(m *testing.M) {
var cleanup func()
app := kvstore.NewKVStoreApplication()
node = rpctest.StartTendermint(app)
node, cleanup = rpctest.StartTendermint(app)
code := m.Run()
node.Stop()
node.Wait()
cleanup()
os.Exit(code)
}


+ 2
- 1
mempool/bench_test.go View File

@ -11,7 +11,8 @@ import (
func BenchmarkReap(b *testing.B) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
size := 10000
for i := 0; i < size; i++ {


+ 16
- 8
mempool/mempool_test.go View File

@ -25,7 +25,7 @@ import (
"github.com/tendermint/tendermint/types"
)
func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
func newMempoolWithApp(cc proxy.ClientCreator) (*Mempool, func()) {
config := cfg.ResetTestRoot("mempool_test")
appConnMem, _ := cc.NewABCIClient()
@ -36,7 +36,7 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
}
mempool := NewMempool(config.Mempool, appConnMem, 0)
mempool.SetLogger(log.TestingLogger())
return mempool
return mempool, func() { os.RemoveAll(config.RootDir) }
}
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
@ -82,7 +82,8 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
func TestReapMaxBytesMaxGas(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
// Ensure gas calculation behaves as expected
checkTxs(t, mempool, 1)
@ -130,7 +131,8 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
func TestMempoolFilters(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
emptyTxArr := []types.Tx{[]byte{}}
nopPreFilter := func(tx types.Tx) error { return nil }
@ -168,7 +170,8 @@ func TestMempoolFilters(t *testing.T) {
func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil)
err := mempool.CheckTx([]byte{0x01}, nil)
if assert.Error(t, err) {
@ -179,7 +182,8 @@ func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
func TestTxsAvailable(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool.EnableTxsAvailable()
timeoutMS := 500
@ -224,7 +228,9 @@ func TestSerialReap(t *testing.T) {
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
appConnCon, _ := cc.NewABCIClient()
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
err := appConnCon.Start()
@ -364,6 +370,7 @@ func TestMempoolCloseWAL(t *testing.T) {
// 3. Create the mempool
wcfg := cfg.DefaultMempoolConfig()
wcfg.RootDir = rootDir
defer os.RemoveAll(wcfg.RootDir)
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
appConnMem, _ := cc.NewABCIClient()
@ -406,7 +413,8 @@ func txMessageSize(tx types.Tx) int {
func TestMempoolMaxMsgSize(t *testing.T) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempl := newMempoolWithApp(cc)
mempl, cleanup := newMempoolWithApp(cc)
defer cleanup()
testCases := []struct {
len int


+ 2
- 1
mempool/reactor_test.go View File

@ -49,7 +49,8 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor
for i := 0; i < N; i++ {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))


+ 7
- 0
node/node_test.go View File

@ -31,6 +31,7 @@ import (
func TestNodeStartStop(t *testing.T) {
config := cfg.ResetTestRoot("node_node_test")
defer os.RemoveAll(config.RootDir)
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
@ -90,6 +91,7 @@ func TestSplitAndTrimEmpty(t *testing.T) {
func TestNodeDelayedStart(t *testing.T) {
config := cfg.ResetTestRoot("node_delayed_start_test")
defer os.RemoveAll(config.RootDir)
now := tmtime.Now()
// create & start node
@ -104,6 +106,7 @@ func TestNodeDelayedStart(t *testing.T) {
func TestNodeSetAppVersion(t *testing.T) {
config := cfg.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(config.RootDir)
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
@ -124,6 +127,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
addr := "tcp://" + testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.BaseConfig.PrivValidatorListenAddr = addr
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
@ -153,6 +157,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
addrNoPrefix := testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
_, err := DefaultNewNode(config, log.TestingLogger())
@ -164,6 +169,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
defer os.Remove(tmpfile) // clean up
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
dialer := privval.DialUnixFn(tmpfile)
@ -200,6 +206,7 @@ func testFreeAddr(t *testing.T) string {
// mempool and evidence pool and validate it.
func TestCreateProposalBlock(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()


+ 3
- 1
rpc/client/main_test.go View File

@ -13,12 +13,14 @@ var node *nm.Node
func TestMain(m *testing.M) {
// start a tendermint node (and kvstore) in the background to test against
var cleanup func()
app := kvstore.NewKVStoreApplication()
node = rpctest.StartTendermint(app)
node, cleanup = rpctest.StartTendermint(app)
code := m.Run()
// and shut down proper at the end
node.Stop()
node.Wait()
cleanup()
os.Exit(code)
}

+ 2
- 1
rpc/grpc/grpc_test.go View File

@ -15,12 +15,13 @@ import (
func TestMain(m *testing.M) {
// start a tendermint node in the background to test against
app := kvstore.NewKVStoreApplication()
node := rpctest.StartTendermint(app)
node, cleanup := rpctest.StartTendermint(app)
code := m.Run()
// and shut down proper at the end
node.Stop()
node.Wait()
cleanup()
os.Exit(code)
}


+ 5
- 5
rpc/test/helpers.go View File

@ -100,8 +100,8 @@ func GetGRPCClient() core_grpc.BroadcastAPIClient {
}
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
func StartTendermint(app abci.Application) *nm.Node {
node := NewTendermint(app)
func StartTendermint(app abci.Application) (*nm.Node, func()) {
node, cleanup := NewTendermint(app)
err := node.Start()
if err != nil {
panic(err)
@ -113,11 +113,11 @@ func StartTendermint(app abci.Application) *nm.Node {
fmt.Println("Tendermint running!")
return node
return node, cleanup
}
// NewTendermint creates a new tendermint server and sleeps forever
func NewTendermint(app abci.Application) *nm.Node {
func NewTendermint(app abci.Application) (*nm.Node, func()) {
// Create & start node
config := GetConfig()
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
@ -138,5 +138,5 @@ func NewTendermint(app abci.Application) *nm.Node {
if err != nil {
panic(err)
}
return node
return node, func() { os.RemoveAll(config.RootDir) }
}

+ 4
- 3
state/state_test.go View File

@ -5,6 +5,7 @@ import (
"fmt"
"math"
"math/big"
"os"
"testing"
"github.com/stretchr/testify/assert"
@ -28,7 +29,7 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) {
state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile")
tearDown := func(t *testing.T) {}
tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) }
return tearDown, stateDB, state
}
@ -802,10 +803,10 @@ func TestLargeGenesisValidator(t *testing.T) {
func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
const valSetSize = 2
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
SaveState(stateDB, state)
defer tearDown(t)
nextHeight := state.LastBlockHeight + 1
@ -825,11 +826,11 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
func TestManyValidatorChangesSaveLoad(t *testing.T) {
const valSetSize = 7
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
require.Equal(t, int64(0), state.LastBlockHeight)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
SaveState(stateDB, state)
defer tearDown(t)
_, valOld := state.Validators.GetByIndex(0)
var pubkeyOld = valOld.PubKey


Loading…
Cancel
Save