@ -8,6 +8,7 @@ import (
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@ -30,10 +31,19 @@ import (
"github.com/tendermint/tendermint/types"
)
var consensusReplayConfig * cfg . Config
func init ( ) {
func TestMain ( m * testing . M ) {
config = ResetConfig ( "consensus_reactor_test" )
consensusReplayConfig = ResetConfig ( "consensus_replay_test" )
configStateTest := ResetConfig ( "consensus_state_test" )
configMempoolTest := ResetConfig ( "consensus_mempool_test" )
configByzantineTest := ResetConfig ( "consensus_byzantine_test" )
code := m . Run ( )
os . RemoveAll ( config . RootDir )
os . RemoveAll ( consensusReplayConfig . RootDir )
os . RemoveAll ( configStateTest . RootDir )
os . RemoveAll ( configMempoolTest . RootDir )
os . RemoveAll ( configByzantineTest . RootDir )
os . Exit ( code )
}
// These tests ensure we can always recover from failure at any part of the consensus process.
@ -51,7 +61,8 @@ func init() {
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
func startNewConsensusStateAndWaitForBlock ( t * testing . T , lastBlockHeight int64 , blockDB dbm . DB , stateDB dbm . DB ) {
func startNewConsensusStateAndWaitForBlock ( t * testing . T , consensusReplayConfig * cfg . Config ,
lastBlockHeight int64 , blockDB dbm . DB , stateDB dbm . DB ) {
logger := log . TestingLogger ( )
state , _ := sm . LoadStateFromDBOrGenesisFile ( stateDB , consensusReplayConfig . GenesisFile ( ) )
privValidator := loadPrivValidator ( consensusReplayConfig )
@ -59,7 +70,6 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
cs . SetLogger ( logger )
bytes , _ := ioutil . ReadFile ( cs . config . WalFile ( ) )
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
t . Logf ( "====== WAL: \n\r%X\n" , bytes )
err := cs . Start ( )
@ -110,21 +120,22 @@ func TestWALCrash(t *testing.T) {
3 } ,
}
for _ , tc := range testCases {
for i , tc := range testCases {
consensusReplayConfig := ResetConfig ( fmt . Sprintf ( "%s_%d" , t . Name ( ) , i ) )
t . Run ( tc . name , func ( t * testing . T ) {
crashWALandCheckLiveness ( t , tc . initFn , tc . heightToStop )
crashWALandCheckLiveness ( t , consensusReplayConfig , tc . initFn , tc . heightToStop )
} )
}
}
func crashWALandCheckLiveness ( t * testing . T , initFn func ( dbm . DB , * ConsensusState , context . Context ) , heightToStop int64 ) {
func crashWALandCheckLiveness ( t * testing . T , consensusReplayConfig * cfg . Config ,
initFn func ( dbm . DB , * ConsensusState , context . Context ) , heightToStop int64 ) {
walPaniced := make ( chan error )
crashingWal := & crashingWAL { panicCh : walPaniced , heightToStop : heightToStop }
i := 1
LOOP :
for {
// fmt.Printf("====== LOOP %d\n", i)
t . Logf ( "====== LOOP %d\n" , i )
// create consensus state from a clean slate
@ -142,6 +153,7 @@ LOOP:
// clean up WAL file from the previous iteration
walFile := cs . config . WalFile ( )
ensureDir ( filepath . Dir ( walFile ) , 0700 )
os . Remove ( walFile )
// set crashing WAL
@ -163,7 +175,7 @@ LOOP:
t . Logf ( "WAL paniced: %v" , err )
// make sure we can make blocks after a crash
startNewConsensusStateAndWaitForBlock ( t , cs . Height , blockDB , stateDB )
startNewConsensusStateAndWaitForBlock ( t , consensusReplayConfig , c s . Height , blockDB , stateDB )
// stop consensus state and transactions sender (initFn)
cs . Stop ( )
@ -269,29 +281,37 @@ var modes = []uint{0, 1, 2}
// Sync from scratch
func TestHandshakeReplayAll ( t * testing . T ) {
for _ , m := range modes {
testHandshakeReplay ( t , 0 , m )
for i , m := range modes {
config := ResetConfig ( fmt . Sprintf ( "%s_%v" , t . Name ( ) , i ) )
defer os . RemoveAll ( config . RootDir )
testHandshakeReplay ( t , config , 0 , m )
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome ( t * testing . T ) {
for _ , m := range modes {
testHandshakeReplay ( t , 1 , m )
for i , m := range modes {
config := ResetConfig ( fmt . Sprintf ( "%s_%v" , t . Name ( ) , i ) )
defer os . RemoveAll ( config . RootDir )
testHandshakeReplay ( t , config , 1 , m )
}
}
// Sync from lagging by one
func TestHandshakeReplayOne ( t * testing . T ) {
for _ , m := range modes {
testHandshakeReplay ( t , NUM_BLOCKS - 1 , m )
for i , m := range modes {
config := ResetConfig ( fmt . Sprintf ( "%s_%v" , t . Name ( ) , i ) )
defer os . RemoveAll ( config . RootDir )
testHandshakeReplay ( t , config , NUM_BLOCKS - 1 , m )
}
}
// Sync from caught up
func TestHandshakeReplayNone ( t * testing . T ) {
for _ , m := range modes {
testHandshakeReplay ( t , NUM_BLOCKS , m )
for i , m := range modes {
config := ResetConfig ( fmt . Sprintf ( "%s_%v" , t . Name ( ) , i ) )
defer os . RemoveAll ( config . RootDir )
testHandshakeReplay ( t , config , NUM_BLOCKS , m )
}
}
@ -311,10 +331,8 @@ func tempWALWithData(data []byte) string {
}
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay ( t * testing . T , nBlocks int , mode uint ) {
config := ResetConfig ( "proxy_test_" )
walBody , err := WALWithNBlocks ( NUM_BLOCKS )
func testHandshakeReplay ( t * testing . T , config * cfg . Config , nBlocks int , mode uint ) {
walBody , err := WALWithNBlocks ( t , NUM_BLOCKS )
require . NoError ( t , err )
walFile := tempWALWithData ( walBody )
config . Consensus . SetWalFile ( walFile )
@ -631,6 +649,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
clientCreator := proxy . NewLocalClientCreator ( app )
config := ResetConfig ( "proxy_test_" )
defer os . RemoveAll ( config . RootDir )
privVal := privval . LoadFilePV ( config . PrivValidatorKeyFile ( ) , config . PrivValidatorStateFile ( ) )
stateDB , state , store := stateAndStore ( config , privVal . GetPubKey ( ) , 0x0 )