Browse Source

Merge pull request #330 from tendermint/fix_tests

Fix tests
pull/336/merge
Ethan Buchman 8 years ago
committed by GitHub
parent
commit
0204d3c6a6
19 changed files with 235 additions and 188 deletions
  1. +7
    -12
      blockchain/reactor.go
  2. +1
    -1
      circle.yml
  3. +1
    -2
      consensus/byzantine_test.go
  4. +16
    -4
      consensus/common_test.go
  5. +6
    -19
      consensus/reactor_test.go
  6. +5
    -5
      state/execution.go
  7. +1
    -1
      state/execution_test.go
  8. +32
    -38
      test/app/counter_test.sh
  9. +5
    -5
      test/app/test.sh
  10. +5
    -36
      test/p2p/atomic_broadcast/test.sh
  11. +53
    -0
      test/p2p/basic/test.sh
  12. +43
    -0
      test/p2p/fast_sync/check_peer.sh
  13. +7
    -36
      test/p2p/fast_sync/test.sh
  14. +37
    -0
      test/p2p/fast_sync/test_peer.sh
  15. +1
    -1
      test/p2p/local_testnet.sh
  16. +11
    -26
      test/p2p/test.sh
  17. +2
    -0
      test/persist/test.sh
  18. +1
    -1
      test/test_cover.sh
  19. +1
    -1
      types/vote_set.go

+ 7
- 12
blockchain/reactor.go View File

@ -235,23 +235,18 @@ FOR_LOOP:
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
// TODO: use ApplyBlock instead of Exec/Commit/SetAppHash/Save
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: should we be firing events? need to fire NewBlock events manually ...
err := bcR.state.ExecBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader)
if err != nil {
// TODO This is bad, are we zombie?
PanicQ(Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
// NOTE: we could improve performance if we
// didn't make the app commit to disk every block
// ... but we would need a way to get the hash without it persisting
res := bcR.proxyAppConn.CommitSync()
if res.IsErr() {
// TODO Handle gracefully.
PanicQ(Fmt("Failed to commit block at application: %v", res))
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, sm.MockMempool{})
if err != nil {
// TODO This is bad, are we zombie?
PanicQ(Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state.AppHash = res.Data
bcR.state.Save()
}
}


+ 1
- 1
circle.yml View File

@ -30,7 +30,7 @@ dependencies:
test:
override:
- "cd $REPO && make test_integrations":
timeout: 1200
timeout: 1800
post:
- "cd $REPO && bash <(curl -s https://codecov.io/bash)"


+ 1
- 2
consensus/byzantine_test.go View File

@ -29,9 +29,8 @@ func init() {
// Byzantine validator refuses to prevote.
// Heal partition and ensure A sees the commit
func TestByzantine(t *testing.T) {
resetConfigTimeouts()
N := 4
css := randConsensusNet(N)
css := randConsensusNet(N, "consensus_byzantine_test", crankTimeoutPropose)
switches := make([]*p2p.Switch, N)
for i := 0; i < N; i++ {


+ 16
- 4
consensus/common_test.go View File

@ -12,6 +12,7 @@ import (
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-logger"
"github.com/tendermint/go-p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/config/tendermint_test"
@ -256,14 +257,15 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
return cs, vss
}
func randConsensusNet(nValidators int) []*ConsensusState {
func randConsensusNet(nValidators int, testName string, updateConfig func(cfg.Config)) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
css := make([]*ConsensusState, nValidators)
for i := 0; i < nValidators; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("consensus_reactor_test_%d", i))
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
updateConfig(thisConfig)
EnsureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], counter.NewCounterApplication(true))
}
@ -271,14 +273,15 @@ func randConsensusNet(nValidators int) []*ConsensusState {
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators int, nPeers int) []*ConsensusState {
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, updateConfig func(cfg.Config)) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower))
css := make([]*ConsensusState, nPeers)
for i := 0; i < nPeers; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("consensus_reactor_test_%d", i))
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
updateConfig(thisConfig)
EnsureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
var privVal *types.PrivValidator
if i < nValidators {
@ -367,3 +370,12 @@ func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
panic("didnt find peer in switches")
return -1
}
// so we dont violate synchrony assumptions
// TODO: make tests more robust to this instead (handle round changes)
// XXX: especially a problem when running the race detector on circle
func crankTimeoutPropose(config cfg.Config) {
logger.SetLogLevel("info")
config.Set("timeout_propose", 110000) // TODO: crank it to eleventy
config.Set("timeout_commit", 1000)
}

+ 6
- 19
consensus/reactor_test.go View File

@ -9,7 +9,6 @@ import (
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/go-events"
"github.com/tendermint/go-logger"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmsp/example/dummy"
@ -19,26 +18,13 @@ func init() {
config = tendermint_test.ResetConfig("consensus_reactor_test")
}
func resetConfigTimeouts() {
logger.SetLogLevel("info")
//config.Set("log_level", "notice")
config.Set("timeout_propose", 2000)
// config.Set("timeout_propose_delta", 500)
// config.Set("timeout_prevote", 1000)
// config.Set("timeout_prevote_delta", 500)
// config.Set("timeout_precommit", 1000)
// config.Set("timeout_precommit_delta", 500)
config.Set("timeout_commit", 1000)
}
//----------------------------------------------
// in-process testnets
// Ensure a testnet makes blocks
func TestReactor(t *testing.T) {
resetConfigTimeouts()
N := 4
css := randConsensusNet(N)
css := randConsensusNet(N, "consensus_reactor_test", crankTimeoutPropose)
reactors := make([]*ConsensusReactor, N)
eventChans := make([]chan interface{}, N)
for i := 0; i < N; i++ {
@ -70,10 +56,9 @@ func TestReactor(t *testing.T) {
// ensure we can make blocks despite cycling a validator set
func TestValidatorSetChanges(t *testing.T) {
resetConfigTimeouts()
nPeers := 8
nVals := 4
css := randConsensusNetWithPeers(nVals, nPeers)
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", crankTimeoutPropose)
reactors := make([]*ConsensusReactor, nPeers)
eventChans := make([]chan interface{}, nPeers)
for i := 0; i < nPeers; i++ {
@ -134,8 +119,10 @@ func TestValidatorSetChanges(t *testing.T) {
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
newBlock := <-eventChans[j]
err := validateBlock(newBlock.(types.EventDataNewBlock).Block, activeVals)
newBlockI := <-eventChans[j]
newBlock := newBlockI.(types.EventDataNewBlock).Block
log.Info("Got block", "height", newBlock.Height, "validator", j)
err := validateBlock(newBlock, activeVals)
if err != nil {
t.Fatal(err)
}


+ 5
- 5
state/execution.go View File

@ -280,12 +280,12 @@ type Mempool interface {
Update(height int, txs []types.Tx)
}
type mockMempool struct {
type MockMempool struct {
}
func (m mockMempool) Lock() {}
func (m mockMempool) Unlock() {}
func (m mockMempool) Update(height int, txs []types.Tx) {}
func (m MockMempool) Lock() {}
func (m MockMempool) Unlock() {}
func (m MockMempool) Update(height int, txs []types.Tx) {}
//----------------------------------------------------------------
// Handshake with app to sync to latest state of core by replaying blocks
@ -386,7 +386,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, appConnCon
var eventCache types.Fireable // nil
// replay the block against the actual tendermint state
return h.state.ApplyBlock(eventCache, appConnConsensus, block, blockMeta.PartsHeader, mockMempool{})
return h.state.ApplyBlock(eventCache, appConnConsensus, block, blockMeta.PartsHeader, MockMempool{})
} else {
// either we're caught up or there's blocks to replay


+ 1
- 1
state/execution_test.go View File

@ -20,7 +20,7 @@ var (
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("handshake_test"))
chainID = "handshake_chain"
nBlocks = 5
mempool = mockMempool{}
mempool = MockMempool{}
testPartSize = 65536
)


+ 32
- 38
test/app/counter_test.sh View File

@ -1,4 +1,9 @@
#! /bin/bash
if [[ "$GRPC_BROADCAST_TX" == "" ]]; then
GRPC_BROADCAST_TX=""
fi
set -u
#####################
@ -25,34 +30,40 @@ function sendTx() {
TX=$1
if [[ "$GRPC_BROADCAST_TX" == "" ]]; then
RESPONSE=`curl -s localhost:46657/broadcast_tx_commit?tx=\"$TX\"`
CODE=`echo $RESPONSE | jq .result[1].code`
ERROR=`echo $RESPONSE | jq .error`
ERROR=$(echo "$ERROR" | tr -d '"') # remove surrounding quotes
RESPONSE=`echo $RESPONSE | jq .result[1]`
else
if [ ! -f grpc_client ]; then
go build -o grpc_client grpc_client.go
fi
RESPONSE=`./grpc_client $TX`
echo $RESPONSE | jq . &> /dev/null
IS_JSON=$?
if [[ "$IS_JSON" != "0" ]]; then
ERROR="$RESPONSE"
else
ERROR="" # reset
fi
APPEND_TX_RESPONSE=`echo $RESPONSE | jq .append_tx`
APPEND_TX_CODE=`getCode "$APPEND_TX_RESPONSE"`
CHECK_TX_RESPONSE=`echo $RESPONSE | jq .check_tx`
CHECK_TX_CODE=`getCode "$CHECK_TX_RESPONSE"`
echo "-------"
echo "TX $TX"
echo "RESPONSE $RESPONSE"
echo "CHECK_TX_RESPONSE $CHECK_TX_RESPONSE"
echo "APPEND_TX_RESPONSE $APPEND_TX_RESPONSE"
echo "CHECK_TX_CODE $CHECK_TX_CODE"
echo "APPEND_TX_CODE $APPEND_TX_CODE"
echo "----"
ERROR=""
fi
echo "RESPONSE"
echo $RESPONSE
echo $RESPONSE | jq . &> /dev/null
IS_JSON=$?
if [[ "$IS_JSON" != "0" ]]; then
ERROR="$RESPONSE"
fi
APPEND_TX_RESPONSE=`echo $RESPONSE | jq .append_tx`
APPEND_TX_CODE=`getCode "$APPEND_TX_RESPONSE"`
CHECK_TX_RESPONSE=`echo $RESPONSE | jq .check_tx`
CHECK_TX_CODE=`getCode "$CHECK_TX_RESPONSE"`
echo "-------"
echo "TX $TX"
echo "RESPONSE $RESPONSE"
echo "ERROR $ERROR"
echo "----"
if [[ "$ERROR" != "" ]]; then
echo "Unexpected error sending tx ($TX): $ERROR"
exit 1
fi
}
@ -66,10 +77,6 @@ if [[ $APPEND_TX_CODE != 0 ]]; then
exit 1
fi
if [[ "$GRPC_BROADCAST_TX" == "" && "$ERROR" != "" ]]; then
echo "Unexpected error. Tx $TX should have been included in a block. $ERROR"
exit 1
fi
echo "... sending tx. expect error"
@ -80,11 +87,6 @@ if [[ "$CHECK_TX_CODE" == 0 ]]; then
echo "Got zero exit code for $TX. Expected tx to be rejected by mempool. $RESPONSE"
exit 1
fi
if [[ "$GRPC_BROADCAST_TX" == "" && "$ERROR" == "" ]]; then
echo "Expected to get an error - tx $TX should have been rejected from mempool"
echo "$RESPONSE"
exit 1
fi
echo "... sending tx. expect no error"
@ -96,10 +98,6 @@ if [[ $APPEND_TX_CODE != 0 ]]; then
echo "Got non-zero exit code for $TX. $RESPONSE"
exit 1
fi
if [[ "$GRPC_BROADCAST_TX" == "" && "$ERROR" != "" ]]; then
echo "Unexpected error. Tx $TX should have been accepted in block. $ERROR"
exit 1
fi
echo "... sending tx. expect no error, but invalid"
@ -114,9 +112,5 @@ if [[ $APPEND_TX_CODE == 0 ]]; then
echo "Got zero exit code for $TX. Should have been bad nonce. $RESPONSE"
exit 1
fi
if [[ "$GRPC_BROADCAST_TX" == "" && "$ERROR" != "" ]]; then
echo "Unexpected error. Tx $TX should have been included in a block. $ERROR"
exit 1
fi
echo "Passed Test: $TESTNAME"

+ 5
- 5
test/app/test.sh View File

@ -13,7 +13,7 @@ export TMROOT=$HOME/.tendermint_app
function dummy_over_socket(){
rm -rf $TMROOT
tendermint init
echo "Starting dummy and tendermint"
echo "Starting dummy_over_socket"
dummy > /dev/null &
pid_dummy=$!
tendermint node > tendermint.log &
@ -30,7 +30,7 @@ function dummy_over_socket(){
function dummy_over_socket_reorder(){
rm -rf $TMROOT
tendermint init
echo "Starting tendermint and dummy"
echo "Starting dummy_over_socket_reorder (ie. start tendermint first)"
tendermint node > tendermint.log &
pid_tendermint=$!
sleep 2
@ -48,7 +48,7 @@ function dummy_over_socket_reorder(){
function counter_over_socket() {
rm -rf $TMROOT
tendermint init
echo "Starting counter and tendermint"
echo "Starting counter_over_socket"
counter --serial > /dev/null &
pid_counter=$!
tendermint node > tendermint.log &
@ -64,7 +64,7 @@ function counter_over_socket() {
function counter_over_grpc() {
rm -rf $TMROOT
tendermint init
echo "Starting counter and tendermint"
echo "Starting counter_over_grpc"
counter --serial --tmsp grpc > /dev/null &
pid_counter=$!
tendermint node --tmsp grpc > tendermint.log &
@ -80,7 +80,7 @@ function counter_over_grpc() {
function counter_over_grpc_grpc() {
rm -rf $TMROOT
tendermint init
echo "Starting counter and tendermint"
echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)"
counter --serial --tmsp grpc > /dev/null &
pid_counter=$!
sleep 1


+ 5
- 36
test/p2p/atomic_broadcast/test.sh View File

@ -1,52 +1,21 @@
#! /bin/bash
set -u
N=$1
###################################################################
# wait for all peers to come online
# assumes peers are already synced up
# test sending txs
# for each peer:
# wait to have 3 peers
# wait to be at height > 1
# send a tx, wait for commit
# assert app hash on every peer reflects the post tx state
###################################################################
N=4
# wait for everyone to come online
echo "Waiting for nodes to come online"
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
curl -s $addr/status > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
done
echo "... node $i is up"
done
echo ""
# run the test on each of them
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
# - assert everyone has 3 other peers
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
while [ "$N_PEERS" != 3 ]; do
echo "Waiting for node $i to connect to all peers ..."
sleep 1
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
done
# - assert block height is greater than 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
while [ "$BLOCK_HEIGHT" -le 1 ]; do
echo "Waiting for node $i to commit a block ..."
sleep 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
done
echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT"
# current state
HASH1=`curl -s $addr/status | jq .result[1].latest_app_hash`


+ 53
- 0
test/p2p/basic/test.sh View File

@ -0,0 +1,53 @@
#! /bin/bash
set -u
N=$1
###################################################################
# wait for all peers to come online
# for each peer:
# wait to have N-1 peers
# wait to be at height > 1
###################################################################
# wait for everyone to come online
echo "Waiting for nodes to come online"
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
curl -s $addr/status > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
done
echo "... node $i is up"
done
echo ""
# wait for each of them to sync up
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
N_1=$(($N - 1))
# - assert everyone has N-1 other peers
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
while [ "$N_PEERS" != $N_1 ]; do
echo "Waiting for node $i to connect to all peers ..."
sleep 1
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
done
# - assert block height is greater than 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
while [ "$BLOCK_HEIGHT" -le 1 ]; do
echo "Waiting for node $i to commit a block ..."
sleep 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
done
echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT"
done
echo ""
echo "PASS"
echo ""

+ 43
- 0
test/p2p/fast_sync/check_peer.sh View File

@ -0,0 +1,43 @@
#! /bin/bash
set -eu
set -o pipefail
ID=$1
###########################################
#
# Wait for peer to catchup to other peers
#
###########################################
addr=$(test/p2p/ip.sh $ID):46657
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1
peer_addr=$(test/p2p/ip.sh $peerID):46657
# get another peer's height
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height`
# get another peer's state
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash`
echo "Other peer is on height $h1 with state $root1"
echo "Waiting for peer $ID to catch up"
# wait for it to sync to past its previous height
set +e
set +o pipefail
h2="0"
while [[ "$h2" -lt "$(($h1+3))" ]]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
echo "... $h2"
done
# check the app hash
root2=`curl -s $addr/status | jq .result[1].latest_app_hash`
if [[ "$root1" != "$root2" ]]; then
echo "App hash after fast sync does not match. Got $root2; expected $root1"
exit 1
fi
echo "... fast sync successful"

+ 7
- 36
test/p2p/fast_sync/test.sh View File

@ -1,44 +1,15 @@
#! /bin/bash
set -eu
set -o pipefail
###############################################################
# for each peer:
# kill peer
# bring it back online via fast sync
# check app hash
###############################################################
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
ID=$1
cd $GOPATH/src/github.com/tendermint/tendermint
addr=$(test/p2p/ip.sh $ID):46657
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1
peer_addr=$(test/p2p/ip.sh $peerID):46657
# get another peer's height
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height`
# get another peer's state
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash`
echo "Other peer is on height $h1 with state $root1"
echo "Waiting for peer $ID to catch up"
# wait for it to sync to past its previous height
set +e
set +o pipefail
h2="0"
while [[ "$h2" -lt "$(($h1+3))" ]]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
echo "... $h2"
# run it on each of them
for i in `seq 1 $N`; do
bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N
done
# check the app hash
root2=`curl -s $addr/status | jq .result[1].latest_app_hash`
if [[ "$root1" != "$root2" ]]; then
echo "App hash after fast sync does not match. Got $root2; expected $root1"
exit 1
fi
echo "... fast sync successful"

+ 37
- 0
test/p2p/fast_sync/test_peer.sh View File

@ -0,0 +1,37 @@
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
COUNT=$3
N=$4
###############################################################
# this runs on each peer:
# kill peer
# bring it back online via fast sync
# wait for it to sync and check the app hash
###############################################################
echo "Testing fasysync on node $COUNT"
# kill peer
set +e # circle sigh :(
docker rm -vf local_testnet_$COUNT
set -e
# restart peer - should have an empty blockchain
SEEDS="$(test/p2p/ip.sh 1):46656"
for j in `seq 2 $N`; do
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656"
done
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $COUNT $SEEDS
# wait for peer to sync and check the app hash
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$COUNT "test/p2p/fast_sync/check_peer.sh $COUNT"
echo ""
echo "PASS"
echo ""

+ 1
- 1
test/p2p/local_testnet.sh View File

@ -3,13 +3,13 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
cd $GOPATH/src/github.com/tendermint/tendermint
# create docker network
docker network create --driver bridge --subnet 172.57.0.0/16 $NETWORK_NAME
N=4
seeds="$(test/p2p/ip.sh 1):46656"
for i in `seq 2 $N`; do
seeds="$seeds,$(test/p2p/ip.sh $i):46656"


+ 11
- 26
test/p2p/test.sh View File

@ -3,36 +3,21 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=local_testnet
N=4
cd $GOPATH/src/github.com/tendermint/tendermint
# start the testnet on a local network
bash test/p2p/local_testnet.sh $DOCKER_IMAGE $NETWORK_NAME
# test atomic broadcast
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME ab test/p2p/atomic_broadcast/test.sh
# test fast sync (from current state of network)
# run it on each of them
N=4
for i in `seq 1 $N`; do
echo "Testing fasysync on node $i"
# kill peer
set +e # circle sigh :(
docker rm -vf local_testnet_$i
set -e
bash test/p2p/local_testnet.sh $DOCKER_IMAGE $NETWORK_NAME $N
# restart peer - should have an empty blockchain
SEEDS="$(test/p2p/ip.sh 1):46656"
for j in `seq 2 $N`; do
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656"
done
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $SEEDS
# test basic connectivity and consensus
# start client container and check the num peers and height for all nodes
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME basic "test/p2p/basic/test.sh $N"
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$i "test/p2p/fast_sync/test.sh $i"
done
echo ""
echo "PASS"
echo ""
# test atomic broadcast:
# start client container and test sending a tx to each node
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME ab "test/p2p/atomic_broadcast/test.sh $N"
# test fast sync (from current state of network):
# for each node, kill it and readd via fast sync
bash test/p2p/fast_sync/test.sh $DOCKER_IMAGE $NETWORK_NAME $N

+ 2
- 0
test/persist/test.sh View File

@ -37,6 +37,7 @@ function send_txs(){
start_procs 1
send_txs
kill_procs
start_procs 2
# wait for node to handshake and make a new block
@ -64,5 +65,6 @@ while [ "$h2" == "$h1" ]; do
done
kill_procs
sleep 2
echo "Passed Test: Persistence"

+ 1
- 1
test/test_cover.sh View File

@ -5,7 +5,7 @@ PKGS=$(go list github.com/tendermint/tendermint/... | grep -v /vendor/)
set -e
echo "mode: atomic" > coverage.txt
for pkg in ${PKGS[@]}; do
go test -race -coverprofile=profile.out -covermode=atomic $pkg
go test -timeout 20m -race -coverprofile=profile.out -covermode=atomic $pkg
if [ -f profile.out ]; then
tail -n +2 profile.out >> coverage.txt;
rm profile.out


+ 1
- 1
types/vote_set.go View File

@ -325,7 +325,7 @@ func (voteSet *VoteSet) BitArrayByBlockID(blockID BlockID) *BitArray {
defer voteSet.mtx.Unlock()
votesByBlock, ok := voteSet.votesByBlock[blockID.Key()]
if ok {
return votesByBlock.bitArray
return votesByBlock.bitArray.Copy()
}
return nil
}


Loading…
Cancel
Save