Browse Source

Merge pull request #840 from tendermint/fix/tests

Fix/tests
pull/848/head
Ethan Buchman 7 years ago
committed by GitHub
parent
commit
3db44dacae
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 34 additions and 17 deletions
  1. +5
    -0
      certifiers/client/provider.go
  2. +6
    -7
      certifiers/client/provider_test.go
  3. +2
    -2
      consensus/reactor.go
  4. +21
    -8
      consensus/reactor_test.go

+ 5
- 0
certifiers/client/provider.go View File

@ -40,6 +40,11 @@ func NewHTTPProvider(remote string) certifiers.Provider {
}
}
// StatusClient returns the internal node as a StatusClient
func (p *provider) StatusClient() rpcclient.StatusClient {
return p.node
}
// StoreCommit is a noop, as clients can only read from the chain...
func (p *provider) StoreCommit(_ certifiers.FullCommit) error { return nil }


+ 6
- 7
certifiers/client/provider_test.go View File

@ -1,17 +1,15 @@
package client_test
package client
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/certifiers"
"github.com/tendermint/tendermint/certifiers/client"
certerr "github.com/tendermint/tendermint/certifiers/errors"
rpcclient "github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
func TestProvider(t *testing.T) {
@ -20,11 +18,12 @@ func TestProvider(t *testing.T) {
cfg := rpctest.GetConfig()
rpcAddr := cfg.RPC.ListenAddress
chainID := cfg.ChainID
p := client.NewHTTPProvider(rpcAddr)
p := NewHTTPProvider(rpcAddr)
require.NotNil(t, p)
// let it produce some blocks
time.Sleep(500 * time.Millisecond)
err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil)
require.Nil(err)
// let's get the highest block
seed, err := p.LatestCommit()


+ 2
- 2
consensus/reactor.go View File

@ -1055,8 +1055,8 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
}
func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
logger := ps.logger.With("peerRound", ps.Round, "height", height, "round", round)
logger.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.Height, ps.Round), "H/R", cmn.Fmt("%d/%d", height, round))
logger.Debug("setHasVote", "type", type_, "index", index)
// NOTE: some may be nil BitArrays -> no side effects.
psVotes := ps.getVoteBitArray(height, round, type_)


+ 21
- 8
consensus/reactor_test.go View File

@ -3,6 +3,8 @@ package consensus
import (
"context"
"fmt"
"os"
"runtime/pprof"
"sync"
"testing"
"time"
@ -29,11 +31,16 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*Consensus
eventBuses := make([]*types.EventBus, N)
logger := consensusLogger()
for i := 0; i < N; i++ {
/*thisLogger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
if err != nil { t.Fatal(err)}*/
thisLogger := logger
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))
reactors[i].conS.SetLogger(thisLogger.With("validator", i))
reactors[i].SetLogger(thisLogger.With("validator", i))
eventBuses[i] = types.NewEventBus()
eventBuses[i].SetLogger(logger.With("module", "events", "validator", i))
eventBuses[i].SetLogger(thisLogger.With("module", "events", "validator", i))
_, err := eventBuses[i].Start()
require.NoError(t, err)
@ -52,6 +59,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*Consensus
// now that everyone is connected, start the state machines
// If we started the state machines before everyone was connected,
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
// TODO: is this still true with new pubsub?
for i := 0; i < N; i++ {
s := reactors[i].conS.GetState()
reactors[i].SwitchToConsensus(s, 0)
@ -304,7 +312,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
}, css)
}
func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
var newBlock *types.Block
LOOP:
@ -355,15 +363,20 @@ func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []*
close(done)
}()
// we're running many nodes in-process, possibly in in a virtual machine,
// and spewing debug messages - making a block could take a while,
timeout := time.Second * 60
select {
case <-done:
case <-time.After(time.Second * 10):
case <-time.After(timeout):
for i, cs := range css {
fmt.Println("#################")
fmt.Println("Validator", i)
fmt.Println(cs.GetRoundState())
fmt.Println("")
t.Log("#################")
t.Log("Validator", i)
t.Log(cs.GetRoundState())
t.Log("")
}
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
panic("Timed out waiting for all validators to commit a block")
}
}

Loading…
Cancel
Save