Browse Source

Merge pull request #343 from tendermint/restart_test

Crash/Restart tests
pull/354/head
Ethan Buchman 8 years ago
committed by GitHub
parent
commit
12d92fd5db
20 changed files with 335 additions and 127 deletions
  1. +2
    -0
      .gitignore
  2. +1
    -0
      consensus/replay.go
  3. +18
    -0
      consensus/state.go
  4. +5
    -0
      consensus/wal.go
  5. +2
    -0
      proxy/client.go
  6. +51
    -21
      state/execution.go
  7. +51
    -16
      state/state.go
  8. +3
    -0
      test/docker/Dockerfile
  9. +4
    -3
      test/p2p/client.sh
  10. +2
    -1
      test/p2p/fast_sync/test.sh
  11. +8
    -7
      test/p2p/fast_sync/test_peer.sh
  12. +48
    -0
      test/p2p/kill_all/check_peers.sh
  13. +32
    -0
      test/p2p/kill_all/test.sh
  14. +2
    -1
      test/p2p/local_testnet_start.sh
  15. +12
    -0
      test/p2p/local_testnet_stop.sh
  16. +8
    -7
      test/p2p/peer.sh
  17. +12
    -2
      test/p2p/test.sh
  18. +2
    -67
      test/persist/test.sh
  19. +2
    -2
      test/persist/test_failure_indices.sh
  20. +70
    -0
      test/persist/test_simple.sh

+ 2
- 0
.gitignore View File

@ -10,3 +10,5 @@ rpc/test/.tendermint
remote_dump
.revision
vendor
.vagrant
test/p2p/data/

+ 1
- 0
consensus/replay.go View File

@ -101,6 +101,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
// Search for height marker
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "height", csHeight)
return nil
} else if err != nil {
return err


+ 18
- 0
consensus/state.go View File

@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"sync"
"time"
@ -347,6 +348,23 @@ func (cs *ConsensusState) OnStart() error {
return err
}
// If the latest block was applied in the tmsp handshake,
// we may not have written the current height to the wal,
// so check here and write it if not found.
// TODO: remove this and run the handhsake/replay
// through the consensus state with a mock app
gr, found, err := cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(cs.Height))
if (err == io.EOF || !found) && cs.Step == RoundStepNewHeight {
log.Warn("Height not found in wal. Writing new height", "height", cs.Height)
rs := cs.RoundStateEvent()
cs.wal.Save(rs)
} else if err != nil {
return err
}
if gr != nil {
gr.Close()
}
// we need the timeoutRoutine for replay so
// we don't block on the tick chan.
// NOTE: we will get a build up of garbage go routines


+ 5
- 0
consensus/wal.go View File

@ -104,4 +104,9 @@ func (wal *WAL) Save(wmsg WALMessage) {
func (wal *WAL) writeHeight(height int) {
wal.group.WriteLine(Fmt("#HEIGHT: %v", height))
// TODO: only flush when necessary
if err := wal.group.Flush(); err != nil {
PanicQ(Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
}
}

+ 2
- 0
proxy/client.go View File

@ -71,6 +71,8 @@ func DefaultClientCreator(config cfg.Config) ClientCreator {
switch addr {
case "dummy":
return NewLocalClientCreator(dummy.NewDummyApplication())
case "persistent_dummy":
return NewLocalClientCreator(dummy.NewPersistentDummyApplication(config.GetString("db_dir")))
case "nilapp":
return NewLocalClientCreator(nilapp.NewNilApplication())
default:


+ 51
- 21
state/execution.go View File

@ -56,7 +56,9 @@ func (s *State) ExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnC
// save state with updated height/blockhash/validators
// but stale apphash, in case we fail between Commit and Save
s.Save()
s.SaveIntermediate()
fail.Fail() // XXX
return nil
}
@ -264,7 +266,6 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl
// Set the state's new AppHash
s.AppHash = res.Data
s.AppHashIsStale = false
// Update mempool.
mempool.Update(block.Height, block.Txs)
@ -322,7 +323,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
return nil
}
log.Notice("TMSP Handshake", "height", blockInfo.BlockHeight, "app_hash", blockInfo.AppHash)
log.Notice("TMSP Handshake", "appHeight", blockInfo.BlockHeight, "appHash", blockInfo.AppHash)
blockHeight := int(blockInfo.BlockHeight) // XXX: beware overflow
appHash := blockInfo.AppHash
@ -343,6 +344,9 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
return errors.New(Fmt("Error on replay: %v", err))
}
// Save the state
h.state.Save()
// TODO: (on restart) replay mempool
return nil
@ -352,29 +356,45 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, appConnConsensus proxy.AppConnConsensus) error {
storeBlockHeight := h.store.Height()
if storeBlockHeight < appBlockHeight {
stateBlockHeight := h.state.LastBlockHeight
log.Notice("TMSP Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
if storeBlockHeight == 0 {
return nil
} else if storeBlockHeight < appBlockHeight {
// if the app is ahead, there's nothing we can do
return ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
} else if storeBlockHeight == appBlockHeight {
// if we crashed between Commit and SaveState,
// the state's app hash is stale
// otherwise we're synced
if h.state.AppHashIsStale {
h.state.AppHashIsStale = false
// We ran Commit, but if we crashed before state.Save(),
// load the intermediate state and update the state.AppHash.
// NOTE: If TMSP allowed rollbacks, we could just replay the
// block even though it's been committed
stateAppHash := h.state.AppHash
lastBlockAppHash := h.store.LoadBlock(storeBlockHeight).AppHash
if bytes.Equal(stateAppHash, appHash) {
// we're all synced up
log.Debug("TMSP RelpayBlocks: Already synced")
} else if bytes.Equal(stateAppHash, lastBlockAppHash) {
// we crashed after commit and before saving state,
// so load the intermediate state and update the hash
h.state.LoadIntermediate()
h.state.AppHash = appHash
log.Debug("TMSP RelpayBlocks: Loaded intermediate state and updated state.AppHash")
} else {
PanicSanity(Fmt("Unexpected state.AppHash: state.AppHash %X; app.AppHash %X, lastBlock.AppHash %X", stateAppHash, appHash, lastBlockAppHash))
}
return nil
} else if h.state.LastBlockHeight == appBlockHeight {
// store is ahead of app but core's state height is at apps height
// this happens if we crashed after saving the block,
// but before committing it. We should be 1 ahead
if storeBlockHeight != appBlockHeight+1 {
PanicSanity(Fmt("core.state.height == app.height but store.height (%d) > app.height+1 (%d)", storeBlockHeight, appBlockHeight+1))
}
} else if storeBlockHeight == appBlockHeight+1 &&
storeBlockHeight == stateBlockHeight+1 {
// We crashed after saving the block
// but before Commit (both the state and app are behind),
// so just replay the block
// check that the blocks last apphash is the states apphash
// check that the lastBlock.AppHash matches the state apphash
block := h.store.LoadBlock(storeBlockHeight)
if !bytes.Equal(block.Header.AppHash, appHash) {
return ErrLastStateMismatch{storeBlockHeight, block.Header.AppHash, appHash}
@ -385,13 +405,22 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, appConnCon
h.nBlocks += 1
var eventCache types.Fireable // nil
// replay the block against the actual tendermint state
// replay the latest block
return h.state.ApplyBlock(eventCache, appConnConsensus, block, blockMeta.PartsHeader, MockMempool{})
} else if storeBlockHeight != stateBlockHeight {
// unless we failed before committing or saving state (previous 2 case),
// the store and state should be at the same height!
PanicSanity(Fmt("Expected storeHeight (%d) and stateHeight (%d) to match.", storeBlockHeight, stateBlockHeight))
} else {
// either we're caught up or there's blocks to replay
// store is more than one ahead,
// so app wants to replay many blocks
// replay all blocks starting with appBlockHeight+1
var eventCache types.Fireable // nil
// TODO: use stateBlockHeight instead and let the consensus state
// do the replay
var appHash []byte
for i := appBlockHeight + 1; i <= storeBlockHeight; i++ {
h.nBlocks += 1
@ -413,8 +442,9 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, appConnCon
appHash = res.Data
}
if !bytes.Equal(h.state.AppHash, appHash) {
return errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay", "expected", h.state.AppHash, "got", appHash))
return errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash))
}
return nil
}
return nil
}

+ 51
- 16
state/state.go View File

@ -14,7 +14,8 @@ import (
)
var (
stateKey = []byte("stateKey")
stateKey = []byte("stateKey")
stateIntermediateKey = []byte("stateIntermediateKey")
)
//-----------------------------------------------------------------------------
@ -36,15 +37,17 @@ type State struct {
Validators *types.ValidatorSet
LastValidators *types.ValidatorSet // block.LastCommit validated against this
// AppHash is updated after Commit;
// it's stale after ExecBlock and before Commit
AppHashIsStale bool
AppHash []byte
// AppHash is updated after Commit
AppHash []byte
}
func LoadState(db dbm.DB) *State {
return loadState(db, stateKey)
}
func loadState(db dbm.DB, key []byte) *State {
s := &State{db: db}
buf := db.Get(stateKey)
buf := db.Get(key)
if len(buf) == 0 {
return nil
} else {
@ -60,9 +63,6 @@ func LoadState(db dbm.DB) *State {
}
func (s *State) Copy() *State {
if s.AppHashIsStale {
PanicSanity(Fmt("App hash is stale: %v", s))
}
return &State{
db: s.db,
GenesisDoc: s.GenesisDoc,
@ -72,7 +72,6 @@ func (s *State) Copy() *State {
LastBlockTime: s.LastBlockTime,
Validators: s.Validators.Copy(),
LastValidators: s.LastValidators.Copy(),
AppHashIsStale: false,
AppHash: s.AppHash,
}
}
@ -83,6 +82,35 @@ func (s *State) Save() {
s.db.SetSync(stateKey, s.Bytes())
}
func (s *State) SaveIntermediate() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.db.SetSync(stateIntermediateKey, s.Bytes())
}
// Load the intermediate state into the current state
// and do some sanity checks
func (s *State) LoadIntermediate() {
s2 := loadState(s.db, stateIntermediateKey)
if s.ChainID != s2.ChainID {
PanicSanity(Fmt("State mismatch for ChainID. Got %v, Expected %v", s2.ChainID, s.ChainID))
}
if s.LastBlockHeight+1 != s2.LastBlockHeight {
PanicSanity(Fmt("State mismatch for LastBlockHeight. Got %v, Expected %v", s2.LastBlockHeight, s.LastBlockHeight+1))
}
if !bytes.Equal(s.Validators.Hash(), s2.LastValidators.Hash()) {
PanicSanity(Fmt("State mismatch for LastValidators. Got %X, Expected %X", s2.LastValidators.Hash(), s.Validators.Hash()))
}
if !bytes.Equal(s.AppHash, s2.AppHash) {
PanicSanity(Fmt("State mismatch for AppHash. Got %X, Expected %X", s2.AppHash, s.AppHash))
}
s.setBlockAndValidators(s2.LastBlockHeight, s2.LastBlockID, s2.LastBlockTime, s2.Validators.Copy(), s2.LastValidators.Copy())
}
func (s *State) Equals(s2 *State) bool {
return bytes.Equal(s.Bytes(), s2.Bytes())
}
@ -97,15 +125,22 @@ func (s *State) Bytes() []byte {
}
// Mutate state variables to match block and validators
// Since we don't have the new AppHash yet, we set s.AppHashIsStale=true
// after running EndBlock
func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, prevValSet, nextValSet *types.ValidatorSet) {
s.LastBlockHeight = header.Height
s.LastBlockID = types.BlockID{header.Hash(), blockPartsHeader}
s.LastBlockTime = header.Time
s.setBlockAndValidators(header.Height,
types.BlockID{header.Hash(), blockPartsHeader}, header.Time,
prevValSet, nextValSet)
}
func (s *State) setBlockAndValidators(
height int, blockID types.BlockID, blockTime time.Time,
prevValSet, nextValSet *types.ValidatorSet) {
s.LastBlockHeight = height
s.LastBlockID = blockID
s.LastBlockTime = blockTime
s.Validators = nextValSet
s.LastValidators = prevValSet
s.AppHashIsStale = true
}
func (s *State) GetValidators() (*types.ValidatorSet, *types.ValidatorSet) {


+ 3
- 0
test/docker/Dockerfile View File

@ -21,5 +21,8 @@ COPY . $REPO
RUN go install ./cmd/tendermint
RUN bash scripts/install_tmsp_apps.sh
# expose the volume for debugging
VOLUME $REPO
EXPOSE 46656
EXPOSE 46657

+ 4
- 3
test/p2p/client.sh View File

@ -6,13 +6,14 @@ NETWORK_NAME=$2
ID=$3
CMD=$4
NAME=test_container_$ID
echo "starting test client container with CMD=$CMD"
# run the test container on the local network
docker run -t \
docker run -t --rm \
-v $GOPATH/src/github.com/tendermint/tendermint/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p \
--net=$NETWORK_NAME \
--ip=$(test/p2p/ip.sh "-1") \
--name test_container_$ID \
--name $NAME \
--entrypoint bash \
$DOCKER_IMAGE $CMD

+ 2
- 1
test/p2p/fast_sync/test.sh View File

@ -4,12 +4,13 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
PROXY_APP=$4
cd $GOPATH/src/github.com/tendermint/tendermint
# run it on each of them
for i in `seq 1 $N`; do
bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N
bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N $PROXY_APP
done

+ 8
- 7
test/p2p/fast_sync/test_peer.sh View File

@ -3,8 +3,9 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
COUNT=$3
ID=$3
N=$4
PROXY_APP=$5
###############################################################
# this runs on each peer:
@ -14,22 +15,22 @@ N=$4
###############################################################
echo "Testing fasysync on node $COUNT"
echo "Testing fastsync on node $ID"
# kill peer
# kill peer
set +e # circle sigh :(
docker rm -vf local_testnet_$COUNT
set -e
docker rm -vf local_testnet_$ID
set -e
# restart peer - should have an empty blockchain
SEEDS="$(test/p2p/ip.sh 1):46656"
for j in `seq 2 $N`; do
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656"
done
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $COUNT $SEEDS
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP $SEEDS
# wait for peer to sync and check the app hash
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$COUNT "test/p2p/fast_sync/check_peer.sh $COUNT"
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$ID "test/p2p/fast_sync/check_peer.sh $ID"
echo ""
echo "PASS"


+ 48
- 0
test/p2p/kill_all/check_peers.sh View File

@ -0,0 +1,48 @@
#! /bin/bash
set -eu
NUM_OF_PEERS=$1
# how many attempts for each peer to catch up by height
MAX_ATTEMPTS_TO_CATCH_UP=10
echo "Waiting for nodes to come online"
set +e
for i in $(seq 1 "$NUM_OF_PEERS"); do
addr=$(test/p2p/ip.sh "$i"):46657
curl -s "$addr/status" > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s "$addr/status" > /dev/null
ERR=$?
done
echo "... node $i is up"
done
set -e
# get the first peer's height
addr=$(test/p2p/ip.sh 1):46657
h1=$(curl -s "$addr/status" | jq .result[1].latest_block_height)
echo "1st peer is on height $h1"
echo "Waiting until other peers reporting a height higher than the 1st one"
for i in $(seq 2 "$NUM_OF_PEERS"); do
attempt=1
hi=0
while [[ $hi -le $h1 ]] ; do
addr=$(test/p2p/ip.sh "$i"):46657
hi=$(curl -s "$addr/status" | jq .result[1].latest_block_height)
echo "... peer $i is on height $hi"
((attempt++))
if [ "$attempt" -ge $MAX_ATTEMPTS_TO_CATCH_UP ] ; then
echo "$attempt unsuccessful attempts were made to catch up"
exit 1
fi
sleep 1
done
done

+ 32
- 0
test/p2p/kill_all/test.sh View File

@ -0,0 +1,32 @@
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
NUM_OF_PEERS=$3
NUM_OF_CRASHES=$4
cd "$GOPATH/src/github.com/tendermint/tendermint"
###############################################################
# NUM_OF_CRASHES times:
# restart all peers
# wait for them to sync and check that they are making progress
###############################################################
for i in $(seq 1 "$NUM_OF_CRASHES"); do
echo ""
echo "Restarting all peers! Take $i ..."
# restart all peers
for j in $(seq 1 "$NUM_OF_PEERS"); do
docker stop "local_testnet_$j"
docker start "local_testnet_$j"
done
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" kill_all_$i "test/p2p/kill_all/check_peers.sh $NUM_OF_PEERS"
done
echo ""
echo "PASS"
echo ""

test/p2p/local_testnet.sh → test/p2p/local_testnet_start.sh View File


+ 12
- 0
test/p2p/local_testnet_stop.sh View File

@ -0,0 +1,12 @@
#! /bin/bash
set -u
NETWORK_NAME=$1
N=$2
for i in `seq 1 $N`; do
docker stop local_testnet_$i
docker rm -vf local_testnet_$i
done
docker network rm $NETWORK_NAME

+ 8
- 7
test/p2p/peer.sh View File

@ -4,9 +4,10 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
ID=$3
APP_PROXY=$4
set +u
SEEDS=$4
SEEDS=$5
set -u
if [[ "$SEEDS" != "" ]]; then
SEEDS=" --seeds $SEEDS "
@ -15,9 +16,9 @@ fi
echo "starting tendermint peer ID=$ID"
# start tendermint container on the network
docker run -d \
--net=$NETWORK_NAME \
--ip=$(test/p2p/ip.sh $ID) \
--name local_testnet_$ID \
--entrypoint tendermint \
-e TMROOT=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core \
$DOCKER_IMAGE node $SEEDS --proxy_app=dummy
--net=$NETWORK_NAME \
--ip=$(test/p2p/ip.sh $ID) \
--name local_testnet_$ID \
--entrypoint tendermint \
-e TMROOT=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core \
$DOCKER_IMAGE node $SEEDS --proxy_app=$APP_PROXY

+ 12
- 2
test/p2p/test.sh View File

@ -4,11 +4,18 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=local_testnet
N=4
PROXY_APP=persistent_dummy
cd $GOPATH/src/github.com/tendermint/tendermint
# stop the existing testnet and remove local network
set +e
bash test/p2p/local_testnet_stop.sh $NETWORK_NAME $N
set -e
# start the testnet on a local network
bash test/p2p/local_testnet.sh $DOCKER_IMAGE $NETWORK_NAME $N
# NOTE we re-use the same network for all tests
bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
# test basic connectivity and consensus
# start client container and check the num peers and height for all nodes
@ -20,4 +27,7 @@ bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME ab "test/p2p/atomic_broadcas
# test fast sync (from current state of network):
# for each node, kill it and readd via fast sync
bash test/p2p/fast_sync/test.sh $DOCKER_IMAGE $NETWORK_NAME $N
bash test/p2p/fast_sync/test.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
# test killing all peers
bash test/p2p/kill_all/test.sh $DOCKER_IMAGE $NETWORK_NAME $N 3

+ 2
- 67
test/persist/test.sh View File

@ -1,70 +1,5 @@
#! /bin/bash
cd $GOPATH/src/github.com/tendermint/tendermint
export TMROOT=$HOME/.tendermint_persist
rm -rf $TMROOT
tendermint init
function start_procs(){
name=$1
echo "Starting persistent dummy and tendermint"
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" &
PID_DUMMY=$!
tendermint node &> tendermint_${name}.log &
PID_TENDERMINT=$!
sleep 5
}
function kill_procs(){
kill -9 $PID_DUMMY $PID_TENDERMINT
}
function send_txs(){
# send a bunch of txs over a few blocks
echo "Sending txs"
for i in `seq 1 5`; do
for j in `seq 1 100`; do
tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'`
curl -s 127.0.0.1:46657/broadcast_tx_async?tx=\"$tx\" &> /dev/null
done
sleep 1
done
}
start_procs 1
send_txs
kill_procs
start_procs 2
# wait for node to handshake and make a new block
addr="localhost:46657"
curl -s $addr/status > /dev/null
ERR=$?
i=0
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
i=$(($i + 1))
if [[ $i == 10 ]]; then
echo "Timed out waiting for tendermint to start"
exit 1
fi
done
# wait for a new block
h1=`curl -s $addr/status | jq .result[1].latest_block_height`
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
done
kill_procs
sleep 2
echo "Passed Test: Persistence"
bash ./test/persist/test_failure_indices.sh

test/persist/test2.sh → test/persist/test_failure_indices.sh View File


+ 70
- 0
test/persist/test_simple.sh View File

@ -0,0 +1,70 @@
#! /bin/bash
export TMROOT=$HOME/.tendermint_persist
rm -rf $TMROOT
tendermint init
function start_procs(){
name=$1
echo "Starting persistent dummy and tendermint"
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" &
PID_DUMMY=$!
tendermint node &> tendermint_${name}.log &
PID_TENDERMINT=$!
sleep 5
}
function kill_procs(){
kill -9 $PID_DUMMY $PID_TENDERMINT
}
function send_txs(){
# send a bunch of txs over a few blocks
echo "Sending txs"
for i in `seq 1 5`; do
for j in `seq 1 100`; do
tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'`
curl -s 127.0.0.1:46657/broadcast_tx_async?tx=\"$tx\" &> /dev/null
done
sleep 1
done
}
start_procs 1
send_txs
kill_procs
start_procs 2
# wait for node to handshake and make a new block
addr="localhost:46657"
curl -s $addr/status > /dev/null
ERR=$?
i=0
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
i=$(($i + 1))
if [[ $i == 10 ]]; then
echo "Timed out waiting for tendermint to start"
exit 1
fi
done
# wait for a new block
h1=`curl -s $addr/status | jq .result[1].latest_block_height`
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
done
kill_procs
sleep 2
echo "Passed Test: Persistence"

Loading…
Cancel
Save