Browse Source

consensus: remove crankTimeoutPropose from tests

pull/342/head
Ethan Buchman 8 years ago
parent
commit
e5fb681615
3 changed files with 22 additions and 19 deletions
  1. +11
    -2
      consensus/byzantine_test.go
  2. +2
    -14
      consensus/common_test.go
  3. +9
    -3
      consensus/reactor_test.go

+ 11
- 2
consensus/byzantine_test.go View File

@ -29,7 +29,7 @@ func init() {
// Heal partition and ensure A sees the commit
func TestByzantine(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_byzantine_test", crankTimeoutPropose, newMockTickerFunc(false))
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false))
// give the byzantine validator a normal ticker
css[0].SetTimeoutTicker(NewTimeoutTicker())
@ -60,7 +60,7 @@ func TestByzantine(t *testing.T) {
}
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
conR := NewConsensusReactor(css[i], false)
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
conR.SetEventSwitch(eventSwitch)
var conRI p2p.Reactor
@ -83,6 +83,15 @@ func TestByzantine(t *testing.T) {
p2p.Connect2Switches(sws, i, j)
})
// start the state machines
byzR := reactors[0].(*ByzantineReactor)
s := byzR.reactor.conS.GetState()
byzR.reactor.SwitchToConsensus(s)
for i := 1; i < N; i++ {
cr := reactors[i].(*ConsensusReactor)
cr.SwitchToConsensus(cr.conS.GetState())
}
// byz proposer sends one block to peers[0]
// and the other block to peers[1] and peers[2].
// note peers and switches order don't match.


+ 2
- 14
consensus/common_test.go View File

@ -12,7 +12,6 @@ import (
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-logger"
"github.com/tendermint/go-p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/config/tendermint_test"
@ -257,7 +256,7 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
return cs, vss
}
func randConsensusNet(nValidators int, testName string, updateConfig func(cfg.Config), tickerFunc func() TimeoutTicker) []*ConsensusState {
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
css := make([]*ConsensusState, nValidators)
for i := 0; i < nValidators; i++ {
@ -265,7 +264,6 @@ func randConsensusNet(nValidators int, testName string, updateConfig func(cfg.Co
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
updateConfig(thisConfig)
EnsureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], counter.NewCounterApplication(true))
css[i].SetTimeoutTicker(tickerFunc())
@ -274,7 +272,7 @@ func randConsensusNet(nValidators int, testName string, updateConfig func(cfg.Co
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, updateConfig func(cfg.Config), tickerFunc func() TimeoutTicker) []*ConsensusState {
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower))
css := make([]*ConsensusState, nPeers)
for i := 0; i < nPeers; i++ {
@ -282,7 +280,6 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, updateC
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
updateConfig(thisConfig)
EnsureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
var privVal *types.PrivValidator
if i < nValidators {
@ -373,15 +370,6 @@ func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
return -1
}
// so we dont violate synchrony assumptions
// TODO: make tests more robust to this instead (handle round changes)
// XXX: especially a problem when running the race detector on circle
func crankTimeoutPropose(config cfg.Config) {
logger.SetLogLevel("info")
config.Set("timeout_propose", 110000) // TODO: crank it to eleventy
config.Set("timeout_commit", 1000)
}
//------------------------------------
func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker {


+ 9
- 3
consensus/reactor_test.go View File

@ -24,11 +24,11 @@ func init() {
// Ensure a testnet makes blocks
func TestReactor(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", crankTimeoutPropose, newMockTickerFunc(true))
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true))
reactors := make([]*ConsensusReactor, N)
eventChans := make([]chan interface{}, N)
for i := 0; i < N; i++ {
reactors[i] = NewConsensusReactor(css[i], false)
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
eventSwitch := events.NewEventSwitch()
_, err := eventSwitch.Start()
@ -45,6 +45,12 @@ func TestReactor(t *testing.T) {
return s
}, p2p.Connect2Switches)
// start the state machines
for i := 0; i < N; i++ {
s := reactors[i].conS.GetState()
reactors[i].SwitchToConsensus(s)
}
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
@ -58,7 +64,7 @@ func TestReactor(t *testing.T) {
func TestValidatorSetChanges(t *testing.T) {
nPeers := 8
nVals := 4
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", crankTimeoutPropose, newMockTickerFunc(true))
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true))
reactors := make([]*ConsensusReactor, nPeers)
eventChans := make([]chan interface{}, nPeers)
for i := 0; i < nPeers; i++ {


Loading…
Cancel
Save