Release 0.8.0pull/311/merge v0.8.0
@ -1,25 +1,33 @@ | |||
# -*- mode: ruby -*- | |||
# vi: set ft=ruby : | |||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! | |||
VAGRANTFILE_API_VERSION = "2" | |||
Vagrant.configure("2") do |config| | |||
config.vm.box = "ubuntu/trusty64" | |||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| | |||
config.vm.box = "phusion-open-ubuntu-14.04-amd64" | |||
config.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vbox.box" | |||
# Or, for Ubuntu 12.04: | |||
config.vm.provider :vmware_fusion do |f, override| | |||
override.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vmwarefusion.box" | |||
config.vm.provider "virtualbox" do |v| | |||
v.memory = 2048 | |||
v.cpus = 2 | |||
end | |||
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty? | |||
# Install Docker | |||
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \ | |||
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \ | |||
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; " | |||
# Add vagrant user to the docker group | |||
pkg_cmd << "usermod -a -G docker vagrant; " | |||
config.vm.provision :shell, :inline => pkg_cmd | |||
end | |||
config.vm.provision "shell", inline: <<-SHELL | |||
apt-get update | |||
apt-get install -y --no-install-recommends wget curl jq shellcheck bsdmainutils psmisc | |||
wget -qO- https://get.docker.com/ | sh | |||
usermod -a -G docker vagrant | |||
curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz | |||
tar -xvf go1.7.linux-amd64.tar.gz | |||
mv go /usr/local | |||
echo 'export PATH=$PATH:/usr/local/go/bin' >> /home/vagrant/.profile | |||
mkdir -p /home/vagrant/go/bin | |||
chown -R vagrant:vagrant /home/vagrant/go | |||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.profile | |||
mkdir -p /home/vagrant/go/src/github.com/tendermint | |||
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint | |||
su - vagrant -c 'curl https://glide.sh/get | sh' | |||
su - vagrant -c 'cd /vagrant/ && glide install && make test' | |||
SHELL | |||
end |
@ -0,0 +1,297 @@ | |||
package consensus | |||
import ( | |||
"sync" | |||
"testing" | |||
"time" | |||
"github.com/tendermint/tendermint/config/tendermint_test" | |||
. "github.com/tendermint/go-common" | |||
cfg "github.com/tendermint/go-config" | |||
"github.com/tendermint/go-events" | |||
"github.com/tendermint/go-p2p" | |||
"github.com/tendermint/tendermint/types" | |||
) | |||
func init() { | |||
config = tendermint_test.ResetConfig("consensus_byzantine_test") | |||
} | |||
//---------------------------------------------- | |||
// byzantine failures | |||
// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals). | |||
// byzantine validator sends conflicting proposals into A and B, | |||
// and prevotes/precommits on both of them. | |||
// B sees a commit, A doesn't. | |||
// Byzantine validator refuses to prevote. | |||
// Heal partition and ensure A sees the commit | |||
func TestByzantine(t *testing.T) { | |||
N := 4 | |||
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter) | |||
// give the byzantine validator a normal ticker | |||
css[0].SetTimeoutTicker(NewTimeoutTicker()) | |||
switches := make([]*p2p.Switch, N) | |||
for i := 0; i < N; i++ { | |||
switches[i] = p2p.NewSwitch(cfg.NewMapConfig(nil)) | |||
} | |||
reactors := make([]p2p.Reactor, N) | |||
defer func() { | |||
for _, r := range reactors { | |||
if rr, ok := r.(*ByzantineReactor); ok { | |||
rr.reactor.Switch.Stop() | |||
} else { | |||
r.(*ConsensusReactor).Switch.Stop() | |||
} | |||
} | |||
}() | |||
eventChans := make([]chan interface{}, N) | |||
for i := 0; i < N; i++ { | |||
if i == 0 { | |||
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator.(*types.PrivValidator)) | |||
// make byzantine | |||
css[i].decideProposal = func(j int) func(int, int) { | |||
return func(height, round int) { | |||
byzantineDecideProposalFunc(height, round, css[j], switches[j]) | |||
} | |||
}(i) | |||
css[i].doPrevote = func(height, round int) {} | |||
} | |||
eventSwitch := events.NewEventSwitch() | |||
_, err := eventSwitch.Start() | |||
if err != nil { | |||
t.Fatalf("Failed to start switch: %v", err) | |||
} | |||
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1) | |||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states | |||
conR.SetEventSwitch(eventSwitch) | |||
var conRI p2p.Reactor | |||
conRI = conR | |||
if i == 0 { | |||
conRI = NewByzantineReactor(conR) | |||
} | |||
reactors[i] = conRI | |||
} | |||
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch { | |||
// ignore new switch s, we already made ours | |||
switches[i].AddReactor("CONSENSUS", reactors[i]) | |||
return switches[i] | |||
}, func(sws []*p2p.Switch, i, j int) { | |||
// the network starts partitioned with globally active adversary | |||
if i != 0 { | |||
return | |||
} | |||
p2p.Connect2Switches(sws, i, j) | |||
}) | |||
// start the state machines | |||
byzR := reactors[0].(*ByzantineReactor) | |||
s := byzR.reactor.conS.GetState() | |||
byzR.reactor.SwitchToConsensus(s) | |||
for i := 1; i < N; i++ { | |||
cr := reactors[i].(*ConsensusReactor) | |||
cr.SwitchToConsensus(cr.conS.GetState()) | |||
} | |||
// byz proposer sends one block to peers[0] | |||
// and the other block to peers[1] and peers[2]. | |||
// note peers and switches order don't match. | |||
peers := switches[0].Peers().List() | |||
ind0 := getSwitchIndex(switches, peers[0]) | |||
ind1 := getSwitchIndex(switches, peers[1]) | |||
ind2 := getSwitchIndex(switches, peers[2]) | |||
// connect the 2 peers in the larger partition | |||
p2p.Connect2Switches(switches, ind1, ind2) | |||
// wait for someone in the big partition to make a block | |||
select { | |||
case <-eventChans[ind2]: | |||
} | |||
log.Notice("A block has been committed. Healing partition") | |||
// connect the partitions | |||
p2p.Connect2Switches(switches, ind0, ind1) | |||
p2p.Connect2Switches(switches, ind0, ind2) | |||
// wait till everyone makes the first new block | |||
// (one of them already has) | |||
wg := new(sync.WaitGroup) | |||
wg.Add(2) | |||
for i := 1; i < N-1; i++ { | |||
go func(j int) { | |||
<-eventChans[j] | |||
wg.Done() | |||
}(i) | |||
} | |||
done := make(chan struct{}) | |||
go func() { | |||
wg.Wait() | |||
close(done) | |||
}() | |||
tick := time.NewTicker(time.Second * 10) | |||
select { | |||
case <-done: | |||
case <-tick.C: | |||
for i, reactor := range reactors { | |||
t.Log(Fmt("Consensus Reactor %v", i)) | |||
t.Log(Fmt("%v", reactor)) | |||
} | |||
t.Fatalf("Timed out waiting for all validators to commit first block") | |||
} | |||
} | |||
//------------------------------- | |||
// byzantine consensus functions | |||
func byzantineDecideProposalFunc(height, round int, cs *ConsensusState, sw *p2p.Switch) { | |||
// byzantine user should create two proposals and try to split the vote. | |||
// Avoid sending on internalMsgQueue and running consensus state. | |||
// Create a new proposal block from state/txs from the mempool. | |||
block1, blockParts1 := cs.createProposalBlock() | |||
polRound, polBlockID := cs.Votes.POLInfo() | |||
proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID) | |||
cs.privValidator.SignProposal(cs.state.ChainID, proposal1) // byzantine doesnt err | |||
// Create a new proposal block from state/txs from the mempool. | |||
block2, blockParts2 := cs.createProposalBlock() | |||
polRound, polBlockID = cs.Votes.POLInfo() | |||
proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID) | |||
cs.privValidator.SignProposal(cs.state.ChainID, proposal2) // byzantine doesnt err | |||
block1Hash := block1.Hash() | |||
block2Hash := block2.Hash() | |||
// broadcast conflicting proposals/block parts to peers | |||
peers := sw.Peers().List() | |||
log.Notice("Byzantine: broadcasting conflicting proposals", "peers", len(peers)) | |||
for i, peer := range peers { | |||
if i < len(peers)/2 { | |||
go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1) | |||
} else { | |||
go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2) | |||
} | |||
} | |||
} | |||
func sendProposalAndParts(height, round int, cs *ConsensusState, peer *p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) { | |||
// proposal | |||
msg := &ProposalMessage{Proposal: proposal} | |||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) | |||
// parts | |||
for i := 0; i < parts.Total(); i++ { | |||
part := parts.GetPart(i) | |||
msg := &BlockPartMessage{ | |||
Height: height, // This tells peer that this part applies to us. | |||
Round: round, // This tells peer that this part applies to us. | |||
Part: part, | |||
} | |||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) | |||
} | |||
// votes | |||
cs.mtx.Lock() | |||
prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header()) | |||
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header()) | |||
cs.mtx.Unlock() | |||
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}}) | |||
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}}) | |||
} | |||
//---------------------------------------- | |||
// byzantine consensus reactor | |||
type ByzantineReactor struct { | |||
Service | |||
reactor *ConsensusReactor | |||
} | |||
func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor { | |||
return &ByzantineReactor{ | |||
Service: conR, | |||
reactor: conR, | |||
} | |||
} | |||
func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } | |||
func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() } | |||
func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) { | |||
if !br.reactor.IsRunning() { | |||
return | |||
} | |||
// Create peerState for peer | |||
peerState := NewPeerState(peer) | |||
peer.Data.Set(types.PeerStateKey, peerState) | |||
// Send our state to peer. | |||
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). | |||
if !br.reactor.fastSync { | |||
br.reactor.sendNewRoundStepMessage(peer) | |||
} | |||
} | |||
func (br *ByzantineReactor) RemovePeer(peer *p2p.Peer, reason interface{}) { | |||
br.reactor.RemovePeer(peer, reason) | |||
} | |||
func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte) { | |||
br.reactor.Receive(chID, peer, msgBytes) | |||
} | |||
//---------------------------------------- | |||
// byzantine privValidator | |||
type ByzantinePrivValidator struct { | |||
Address []byte `json:"address"` | |||
types.Signer `json:"-"` | |||
mtx sync.Mutex | |||
} | |||
// Return a priv validator that will sign anything | |||
func NewByzantinePrivValidator(pv *types.PrivValidator) *ByzantinePrivValidator { | |||
return &ByzantinePrivValidator{ | |||
Address: pv.Address, | |||
Signer: pv.Signer, | |||
} | |||
} | |||
func (privVal *ByzantinePrivValidator) GetAddress() []byte { | |||
return privVal.Address | |||
} | |||
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) error { | |||
privVal.mtx.Lock() | |||
defer privVal.mtx.Unlock() | |||
// Sign | |||
vote.Signature = privVal.Sign(types.SignBytes(chainID, vote)) | |||
return nil | |||
} | |||
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) error { | |||
privVal.mtx.Lock() | |||
defer privVal.mtx.Unlock() | |||
// Sign | |||
proposal.Signature = privVal.Sign(types.SignBytes(chainID, proposal)) | |||
return nil | |||
} | |||
func (privVal *ByzantinePrivValidator) String() string { | |||
return Fmt("PrivValidator{%X}", privVal.Address) | |||
} |
@ -0,0 +1,309 @@ | |||
package consensus | |||
import ( | |||
"fmt" | |||
"sync" | |||
"testing" | |||
"time" | |||
"github.com/tendermint/tendermint/config/tendermint_test" | |||
"github.com/tendermint/go-events" | |||
"github.com/tendermint/go-p2p" | |||
"github.com/tendermint/tendermint/types" | |||
"github.com/tendermint/abci/example/dummy" | |||
) | |||
func init() { | |||
config = tendermint_test.ResetConfig("consensus_reactor_test") | |||
} | |||
//---------------------------------------------- | |||
// in-process testnets | |||
func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEventRespond bool) ([]*ConsensusReactor, []chan interface{}) { | |||
reactors := make([]*ConsensusReactor, N) | |||
eventChans := make([]chan interface{}, N) | |||
for i := 0; i < N; i++ { | |||
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states | |||
eventSwitch := events.NewEventSwitch() | |||
_, err := eventSwitch.Start() | |||
if err != nil { | |||
t.Fatalf("Failed to start switch: %v", err) | |||
} | |||
reactors[i].SetEventSwitch(eventSwitch) | |||
if subscribeEventRespond { | |||
eventChans[i] = subscribeToEventRespond(eventSwitch, "tester", types.EventStringNewBlock()) | |||
} else { | |||
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1) | |||
} | |||
} | |||
// make connected switches and start all reactors | |||
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch { | |||
s.AddReactor("CONSENSUS", reactors[i]) | |||
return s | |||
}, p2p.Connect2Switches) | |||
// now that everyone is connected, start the state machines | |||
// If we started the state machines before everyone was connected, | |||
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors | |||
for i := 0; i < N; i++ { | |||
s := reactors[i].conS.GetState() | |||
reactors[i].SwitchToConsensus(s) | |||
} | |||
return reactors, eventChans | |||
} | |||
func stopConsensusNet(reactors []*ConsensusReactor) { | |||
for _, r := range reactors { | |||
r.Switch.Stop() | |||
} | |||
} | |||
// Ensure a testnet makes blocks | |||
func TestReactor(t *testing.T) { | |||
N := 4 | |||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) | |||
reactors, eventChans := startConsensusNet(t, css, N, false) | |||
defer stopConsensusNet(reactors) | |||
// wait till everyone makes the first new block | |||
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) { | |||
<-eventChans[j] | |||
wg.Done() | |||
}, css) | |||
} | |||
//------------------------------------------------------------- | |||
// ensure we can make blocks despite cycling a validator set | |||
func TestVotingPowerChange(t *testing.T) { | |||
nVals := 4 | |||
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentDummy) | |||
reactors, eventChans := startConsensusNet(t, css, nVals, true) | |||
defer stopConsensusNet(reactors) | |||
// map of active validators | |||
activeVals := make(map[string]struct{}) | |||
for i := 0; i < nVals; i++ { | |||
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{} | |||
} | |||
// wait till everyone makes block 1 | |||
timeoutWaitGroup(t, nVals, func(wg *sync.WaitGroup, j int) { | |||
<-eventChans[j] | |||
eventChans[j] <- struct{}{} | |||
wg.Done() | |||
}, css) | |||
//--------------------------------------------------------------------------- | |||
log.Info("---------------------------- Testing changing the voting power of one validator a few times") | |||
val1PubKey := css[0].privValidator.(*types.PrivValidator).PubKey | |||
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25) | |||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { | |||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) | |||
} | |||
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 2) | |||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { | |||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) | |||
} | |||
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 100) | |||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) | |||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { | |||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) | |||
} | |||
} | |||
func TestValidatorSetChanges(t *testing.T) { | |||
nPeers := 7 | |||
nVals := 4 | |||
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentDummy) | |||
reactors, eventChans := startConsensusNet(t, css, nPeers, true) | |||
defer stopConsensusNet(reactors) | |||
// map of active validators | |||
activeVals := make(map[string]struct{}) | |||
for i := 0; i < nVals; i++ { | |||
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{} | |||
} | |||
// wait till everyone makes block 1 | |||
timeoutWaitGroup(t, nPeers, func(wg *sync.WaitGroup, j int) { | |||
<-eventChans[j] | |||
eventChans[j] <- struct{}{} | |||
wg.Done() | |||
}, css) | |||
//--------------------------------------------------------------------------- | |||
log.Info("---------------------------- Testing adding one validator") | |||
newValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey | |||
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower)) | |||
// wait till everyone makes block 2 | |||
// ensure the commit includes all validators | |||
// send newValTx to change vals in block 3 | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1) | |||
// wait till everyone makes block 3. | |||
// it includes the commit for block 2, which is by the original validator set | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
// wait till everyone makes block 4. | |||
// it includes the commit for block 3, which is by the original validator set | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
// the commits for block 4 should be with the updated validator set | |||
activeVals[string(newValidatorPubKey1.Address())] = struct{}{} | |||
// wait till everyone makes block 5 | |||
// it includes the commit for block 4, which should have the updated validator set | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
//--------------------------------------------------------------------------- | |||
log.Info("---------------------------- Testing changing the voting power of one validator") | |||
updateValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey | |||
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25) | |||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { | |||
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower()) | |||
} | |||
//--------------------------------------------------------------------------- | |||
log.Info("---------------------------- Testing adding two validators at once") | |||
newValidatorPubKey2 := css[nVals+1].privValidator.(*types.PrivValidator).PubKey | |||
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower)) | |||
newValidatorPubKey3 := css[nVals+2].privValidator.(*types.PrivValidator).PubKey | |||
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower)) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
activeVals[string(newValidatorPubKey2.Address())] = struct{}{} | |||
activeVals[string(newValidatorPubKey3.Address())] = struct{}{} | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
//--------------------------------------------------------------------------- | |||
log.Info("---------------------------- Testing removing two validators at once") | |||
removeValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0) | |||
removeValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
delete(activeVals, string(newValidatorPubKey2.Address())) | |||
delete(activeVals, string(newValidatorPubKey3.Address())) | |||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) | |||
} | |||
// Check we can make blocks with skip_timeout_commit=false | |||
func TestReactorWithTimeoutCommit(t *testing.T) { | |||
N := 4 | |||
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter) | |||
// override default SkipTimeoutCommit == true for tests | |||
for i := 0; i < N; i++ { | |||
css[i].timeoutParams.SkipTimeoutCommit = false | |||
} | |||
reactors, eventChans := startConsensusNet(t, css, N-1, false) | |||
defer stopConsensusNet(reactors) | |||
// wait till everyone makes the first new block | |||
timeoutWaitGroup(t, N-1, func(wg *sync.WaitGroup, j int) { | |||
<-eventChans[j] | |||
wg.Done() | |||
}, css) | |||
} | |||
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) { | |||
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) { | |||
newBlockI := <-eventChans[j] | |||
newBlock := newBlockI.(types.EventDataNewBlock).Block | |||
log.Warn("Got block", "height", newBlock.Height, "validator", j) | |||
err := validateBlock(newBlock, activeVals) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
for _, tx := range txs { | |||
css[j].mempool.CheckTx(tx, nil) | |||
} | |||
eventChans[j] <- struct{}{} | |||
wg.Done() | |||
log.Warn("Done wait group", "height", newBlock.Height, "validator", j) | |||
}, css) | |||
} | |||
// expects high synchrony! | |||
func validateBlock(block *types.Block, activeVals map[string]struct{}) error { | |||
if block.LastCommit.Size() != len(activeVals) { | |||
return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals)) | |||
} | |||
for _, vote := range block.LastCommit.Precommits { | |||
if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok { | |||
return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress) | |||
} | |||
} | |||
return nil | |||
} | |||
func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []*ConsensusState) { | |||
wg := new(sync.WaitGroup) | |||
wg.Add(n) | |||
for i := 0; i < n; i++ { | |||
go f(wg, i) | |||
} | |||
done := make(chan struct{}) | |||
go func() { | |||
wg.Wait() | |||
close(done) | |||
}() | |||
select { | |||
case <-done: | |||
case <-time.After(time.Second * 10): | |||
for i, cs := range css { | |||
fmt.Println("#################") | |||
fmt.Println("Validator", i) | |||
fmt.Println(cs.GetRoundState()) | |||
fmt.Println("") | |||
} | |||
panic("Timed out waiting for all validators to commit a block") | |||
} | |||
} |
@ -0,0 +1,58 @@ | |||
#! /bin/bash | |||
cd $GOPATH/src/github.com/tendermint/tendermint | |||
# specify a dir to copy | |||
# NOTE: eventually we should replace with `tendermint init --test` | |||
DIR=$HOME/.tendermint_test/consensus_state_test | |||
# XXX: remove tendermint dir | |||
rm -rf $HOME/.tendermint | |||
cp -r $DIR $HOME/.tendermint | |||
function reset(){ | |||
rm -rf $HOME/.tendermint/data | |||
tendermint unsafe_reset_priv_validator | |||
} | |||
reset | |||
# empty block | |||
tendermint node --proxy_app=dummy &> /dev/null & | |||
sleep 5 | |||
killall tendermint | |||
# /q would print up to and including the match, then quit. | |||
# /Q doesn't include the match. | |||
# http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match | |||
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal | |||
reset | |||
# small block 1 | |||
bash scripts/txs/random.sh 1000 36657 &> /dev/null & | |||
PID=$! | |||
tendermint node --proxy_app=dummy &> /dev/null & | |||
sleep 5 | |||
killall tendermint | |||
kill -9 $PID | |||
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal | |||
reset | |||
# small block 2 (part size = 512) | |||
echo "" >> ~/.tendermint/config.toml | |||
echo "block_part_size = 512" >> ~/.tendermint/config.toml | |||
bash scripts/txs/random.sh 1000 36657 &> /dev/null & | |||
PID=$! | |||
tendermint node --proxy_app=dummy &> /dev/null & | |||
sleep 5 | |||
killall tendermint | |||
kill -9 $PID | |||
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal | |||
reset | |||
@ -1,8 +1,10 @@ | |||
{"time":"2016-04-03T11:23:54.387Z","msg":[3,{"duration":972835254,"height":1,"round":0,"step":1}]} | |||
{"time":"2016-04-03T11:23:54.388Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} | |||
{"time":"2016-04-03T11:23:54.388Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"pol_round":-1,"signature":"3A2ECD5023B21EC144EC16CFF1B992A4321317B83EEDD8969FDFEA6EB7BF4389F38DDA3E7BB109D63A07491C16277A197B241CF1F05F5E485C59882ECACD9E07"}}],"peer_key":""}]} | |||
{"time":"2016-04-03T11:23:54.389Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011441D59F4B718AC00000000000000114C4B01D3810579550997AC5641E759E20D99B51C10001000100","proof":{"aunts":[]}}}],"peer_key":""}]} | |||
{"time":"2016-04-03T11:23:54.390Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]} | |||
{"time":"2016-04-03T11:23:54.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":1,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"47D2A75A4E2F15DB1F0D1B656AC0637AF9AADDFEB6A156874F6553C73895E5D5DC948DBAEF15E61276C5342D0E638DFCB77C971CD282096EA8735A564A90F008"}}],"peer_key":""}]} | |||
{"time":"2016-04-03T11:23:54.392Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]} | |||
{"time":"2016-04-03T11:23:54.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":2,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"39147DA595F08B73CF8C899967C8403B5872FD9042FFA4E239159E0B6C5D9665C9CA81D766EACA2AE658872F94C2FCD1E34BF51859CD5B274DA8512BACE4B50D"}}],"peer_key":""}]} | |||
#HEIGHT: 1 | |||
{"time":"2016-12-18T05:05:33.502Z","msg":[3,{"duration":974084551,"height":1,"round":0,"step":1}]} | |||
{"time":"2016-12-18T05:05:33.505Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} | |||
{"time":"2016-12-18T05:05:33.505Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"62C0F2BCCB491399EEDAF8E85837ADDD4E25BAB7A84BFC4F0E88594531FBC6D4755DEC7E6427F04AD7EB8BB89502762AB4380C7BBA93A4C297E6180EC78E3504"]}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:33.506Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F74657374010114914148D83E0DC00000000000000114354594CBFC1A7BCA1AD0050ED6AA010023EADA390001000100000000","proof":{"aunts":[]}}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:33.508Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]} | |||
{"time":"2016-12-18T05:05:33.508Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"B64D0BB64B2E9AAFDD4EBEA679644F77AE774D69E3E2E1B042AB15FE4F84B1427AC6C8A25AFF58EA22011AE567FEA49D2EE7354382E915AD85BF40C58FA6130C"]}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]} | |||
{"time":"2016-12-18T05:05:33.509Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"D83E968392D1BF09821E0D05079DAB5491CABD89BE128BD1CF573ED87148BA84667A56C0A069EFC90760F25EDAC62BC324DBB12EA63F44E6CB2D3500FE5E640F"]}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]} |
@ -1,10 +1,14 @@ | |||
{"time":"2016-10-11T16:21:23.438Z","msg":[3,{"duration":0,"height":1,"round":0,"step":1}]} | |||
{"time":"2016-10-11T16:21:23.440Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} | |||
{"time":"2016-10-11T16:21:23.440Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":3,"hash":"88BC082C86DED0A5E2BBC3677B610D155FEDBCEA"},"pol_round":-1,"signature":"8F74F7032E50DFBC17E8B42DD15FD54858B45EEB1B8DAF6432AFBBB1333AC1E850290DE82DF613A10430EB723023527498D45C106FD2946FEF03A9C8B301020B"}}],"peer_key":""}]} | |||
{"time":"2016-10-11T16:21:23.440Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F746573740101147C86B383BAB78001A60000000001148A3835062BB5E79BE490FAB65168D69BD716AD530114C4B01D3810579550997AC5641E759E20D99B51C1000101A6010F616263643139363D64636261313936010F616263643139373D64636261313937010F616263643139383D64636261313938010F616263643139393D64636261313939010F616263643230303D64636261323030010F616263643230313D64636261323031010F616263643230323D64636261323032010F616263643230333D64636261323033010F616263643230343D64636261323034010F616263643230353D64636261323035010F616263643230363D64636261323036010F616263643230373D64636261323037010F616263643230383D64636261323038010F616263643230393D64636261323039010F616263643231303D64636261323130010F616263643231313D64636261323131010F616263643231323D64636261323132010F616263643231333D64636261323133010F616263643231343D64636261323134010F616263643231353D64636261323135010F616263643231363D64636261323136010F616263643231373D64636261323137010F616263643231383D64636261323138010F616263643231393D64636261323139010F616263643232303D64636261323230010F616263643232313D64636261323231010F616263643232323D64636261323232010F616263643232333D64636261323233010F616263643232343D64636261323234010F616263643232353D64636261323235010F616263643232363D64636261323236010F616263643232373D64636261323237010F616263643232383D64636261323238010F616263643232393D64636261323239010F616263643233303D64636261323330010F616263643233313D64636261323331010F616263643233323D64636261323332010F616263643233333D64636261323333010F616263643233343D64636261323334010F616263643233353D64636261323335010F616263643233363D64636261323336010F616263643233373D64636261323337010F616263643233383D64636261323338010F616263643233393D64636261323339010F616263643234303D64636261323430010F616263643234313D64636261323431010F616263643234323D64636261323432010F616263643234333D64636261323433010F616263643234343D64636261323434010F616263643234353D64636261323435010F616263643234363D64636261323436010F616263643234373D64636261323437010F616263643234383D64636261323438010F616263643234393D64636261323439010F616263643235303D64636261323530010F61626364","proof":{"aunts":["22516491F7E1B5ADD8F12B309E9E8F6F04C034AB","C65A9589F377F2B6CF44B9BAFEBB535DF3C3A4FB"]}}}],"peer_key":""}]} | |||
{"time":"2016-10-11T16:21:23.441Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"3235313D64636261323531010F616263643235323D64636261323532010F616263643235333D64636261323533010F616263643235343D64636261323534010F616263643235353D64636261323535010F616263643235363D64636261323536010F616263643235373D64636261323537010F616263643235383D64636261323538010F616263643235393D64636261323539010F616263643236303D64636261323630010F616263643236313D64636261323631010F616263643236323D64636261323632010F616263643236333D64636261323633010F616263643236343D64636261323634010F616263643236353D64636261323635010F616263643236363D64636261323636010F616263643236373D64636261323637010F616263643236383D64636261323638010F616263643236393D64636261323639010F616263643237303D64636261323730010F616263643237313D64636261323731010F616263643237323D64636261323732010F616263643237333D64636261323733010F616263643237343D64636261323734010F616263643237353D64636261323735010F616263643237363D64636261323736010F616263643237373D64636261323737010F616263643237383D64636261323738010F616263643237393D64636261323739010F616263643238303D64636261323830010F616263643238313D64636261323831010F616263643238323D64636261323832010F616263643238333D64636261323833010F616263643238343D64636261323834010F616263643238353D64636261323835010F616263643238363D64636261323836010F616263643238373D64636261323837010F616263643238383D64636261323838010F616263643238393D64636261323839010F616263643239303D64636261323930010F616263643239313D64636261323931010F616263643239323D64636261323932010F616263643239333D64636261323933010F616263643239343D64636261323934010F616263643239353D64636261323935010F616263643239363D64636261323936010F616263643239373D64636261323937010F616263643239383D64636261323938010F616263643239393D64636261323939010F616263643330303D64636261333030010F616263643330313D64636261333031010F616263643330323D64636261333032010F616263643330333D64636261333033010F616263643330343D64636261333034010F616263643330353D64636261333035010F616263643330363D64636261333036010F616263643330373D64636261333037010F616263643330383D64636261333038010F616263643330393D64636261333039010F616263643331303D64636261333130010F616263643331313D","proof":{"aunts":["F730990451BAB63C3CF6AC8E6ED4F52259CA5F53","C65A9589F377F2B6CF44B9BAFEBB535DF3C3A4FB"]}}}],"peer_key":""}]} | |||
{"time":"2016-10-11T16:21:23.441Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"64636261333131010F616263643331323D64636261333132010F616263643331333D64636261333133010F616263643331343D64636261333134010F616263643331353D64636261333135010F616263643331363D64636261333136010F616263643331373D64636261333137010F616263643331383D64636261333138010F616263643331393D64636261333139010F616263643332303D64636261333230010F616263643332313D64636261333231010F616263643332323D64636261333232010F616263643332333D64636261333233010F616263643332343D64636261333234010F616263643332353D64636261333235010F616263643332363D64636261333236010F616263643332373D64636261333237010F616263643332383D64636261333238010F616263643332393D64636261333239010F616263643333303D64636261333330010F616263643333313D64636261333331010F616263643333323D64636261333332010F616263643333333D64636261333333010F616263643333343D64636261333334010F616263643333353D64636261333335010F616263643333363D64636261333336010F616263643333373D64636261333337010F616263643333383D64636261333338010F616263643333393D64636261333339010F616263643334303D64636261333430010F616263643334313D64636261333431010F616263643334323D64636261333432010F616263643334333D64636261333433010F616263643334343D64636261333434010F616263643334353D64636261333435010F616263643334363D64636261333436010F616263643334373D64636261333437010F616263643334383D64636261333438010F616263643334393D64636261333439010F616263643335303D64636261333530010F616263643335313D64636261333531010F616263643335323D64636261333532010F616263643335333D64636261333533010F616263643335343D64636261333534010F616263643335353D64636261333535010F616263643335363D64636261333536010F616263643335373D64636261333537010F616263643335383D64636261333538010F616263643335393D64636261333539010F616263643336303D64636261333630010F616263643336313D646362613336310100","proof":{"aunts":["56EF782EE04E0359D0B38271FD22B312A546FC3A"]}}}],"peer_key":""}]} | |||
{"time":"2016-10-11T16:21:23.447Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]} | |||
{"time":"2016-10-11T16:21:23.447Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":1,"block_hash":"AAE0ECF64D818A61F6E3D6D11E60F343C3FC8800","block_parts_header":{"total":3,"hash":"88BC082C86DED0A5E2BBC3677B610D155FEDBCEA"},"signature":"0870A9C3FF59DE0F5574B77F030BD160C1E2966AECE815E7C97CFA8BC4A6B01D7A10D91416B1AA02D49EFF7F08A239048CD9CD93E7AE4F80871FBFFF7DBFC50C"}}],"peer_key":""}]} | |||
{"time":"2016-10-11T16:21:23.448Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]} | |||
{"time":"2016-10-11T16:21:23.448Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":2,"block_hash":"AAE0ECF64D818A61F6E3D6D11E60F343C3FC8800","block_parts_header":{"total":3,"hash":"88BC082C86DED0A5E2BBC3677B610D155FEDBCEA"},"signature":"0CEEA8A987D88D0A0870C0076DB8D1B57D3B051D017745B46C4710BBE6DF0F9AE8D5A95B49E4158A1A8C8C6475B8A8E91275303B9C10A5C0C18F40EBB0DA0905"}}],"peer_key":""}]} | |||
#HEIGHT: 1 | |||
{"time":"2016-12-18T05:05:43.641Z","msg":[3,{"duration":969409681,"height":1,"round":0,"step":1}]} | |||
{"time":"2016-12-18T05:05:43.643Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} | |||
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"F1A8E9928889C68FD393F3983B5362AECA4A95AA13FE3C78569B2515EC046893CB718071CAF54F3F1507DCD851B37CD5557EA17BB5471D2DC6FB5AC5FBB72E02"]}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011491414B3483A8400190000000000114926EA77D30A4D19866159DE7E58AA9461F90F9D10114354594CBFC1A7BCA1AD0050ED6AA010023EADA3900010190010D6162636431323D646362613132010D6162636431333D646362613133010D6162636431343D646362613134010D6162636431353D646362613135010D6162636431363D646362613136010D6162636431373D646362613137010D6162636431383D646362613138010D6162636431393D646362613139010D6162636432303D646362613230010D6162636432313D646362613231010D6162636432323D646362613232010D6162636432333D646362613233010D6162636432343D646362613234010D6162636432353D646362613235010D6162636432363D646362613236010D6162636432373D646362613237010D6162636432383D646362613238010D6162636432393D646362613239010D6162636433303D646362613330010D6162636433313D646362613331010D6162636433323D646362613332010D6162636433333D646362613333010D6162636433343D646362613334010D6162636433353D646362613335010D6162636433363D646362613336010D6162636433373D646362613337010D6162636433383D646362613338010D6162636433393D646362613339010D6162636434303D","proof":{"aunts":["C9FBD66B63A976638196323F5B93494BDDFC9EED","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"646362613430010D6162636434313D646362613431010D6162636434323D646362613432010D6162636434333D646362613433010D6162636434343D646362613434010D6162636434353D646362613435010D6162636434363D646362613436010D6162636434373D646362613437010D6162636434383D646362613438010D6162636434393D646362613439010D6162636435303D646362613530010D6162636435313D646362613531010D6162636435323D646362613532010D6162636435333D646362613533010D6162636435343D646362613534010D6162636435353D646362613535010D6162636435363D646362613536010D6162636435373D646362613537010D6162636435383D646362613538010D6162636435393D646362613539010D6162636436303D646362613630010D6162636436313D646362613631010D6162636436323D646362613632010D6162636436333D646362613633010D6162636436343D646362613634010D6162636436353D646362613635010D6162636436363D646362613636010D6162636436373D646362613637010D6162636436383D646362613638010D6162636436393D646362613639010D6162636437303D646362613730010D6162636437313D646362613731010D6162636437323D646362613732010D6162636437333D646362613733010D6162636437343D6463","proof":{"aunts":["D7FB03B935B77C322064F8277823CDB5C7018597","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"62613734010D6162636437353D646362613735010D6162636437363D646362613736010D6162636437373D646362613737010D6162636437383D646362613738010D6162636437393D646362613739010D6162636438303D646362613830010D6162636438313D646362613831010D6162636438323D646362613832010D6162636438333D646362613833010D6162636438343D646362613834010D6162636438353D646362613835010D6162636438363D646362613836010D6162636438373D646362613837010D6162636438383D646362613838010D6162636438393D646362613839010D6162636439303D646362613930010D6162636439313D646362613931010D6162636439323D646362613932010D6162636439333D646362613933010D6162636439343D646362613934010D6162636439353D646362613935010D6162636439363D646362613936010D6162636439373D646362613937010D6162636439383D646362613938010D6162636439393D646362613939010F616263643130303D64636261313030010F616263643130313D64636261313031010F616263643130323D64636261313032010F616263643130333D64636261313033010F616263643130343D64636261313034010F616263643130353D64636261313035010F616263643130363D64636261313036010F616263643130373D64636261","proof":{"aunts":["A607D9BF5107E6C9FD19B6928D9CC7714B0730E4","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":3,"bytes":"313037010F616263643130383D64636261313038010F616263643130393D64636261313039010F616263643131303D64636261313130010F616263643131313D64636261313131010F616263643131323D64636261313132010F616263643131333D64636261313133010F616263643131343D64636261313134010F616263643131353D64636261313135010F616263643131363D64636261313136010F616263643131373D64636261313137010F616263643131383D64636261313138010F616263643131393D64636261313139010F616263643132303D64636261313230010F616263643132313D64636261313231010F616263643132323D64636261313232010F616263643132333D64636261313233010F616263643132343D64636261313234010F616263643132353D64636261313235010F616263643132363D64636261313236010F616263643132373D64636261313237010F616263643132383D64636261313238010F616263643132393D64636261313239010F616263643133303D64636261313330010F616263643133313D64636261313331010F616263643133323D64636261313332010F616263643133333D64636261313333010F616263643133343D64636261313334010F616263643133353D64636261313335010F616263643133363D64636261313336010F616263643133373D646362613133","proof":{"aunts":["0FD794B3506B9E92CDE3703F7189D42167E77095","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":4,"bytes":"37010F616263643133383D64636261313338010F616263643133393D64636261313339010F616263643134303D64636261313430010F616263643134313D64636261313431010F616263643134323D64636261313432010F616263643134333D64636261313433010F616263643134343D64636261313434010F616263643134353D64636261313435010F616263643134363D64636261313436010F616263643134373D64636261313437010F616263643134383D64636261313438010F616263643134393D64636261313439010F616263643135303D64636261313530010F616263643135313D64636261313531010F616263643135323D64636261313532010F616263643135333D64636261313533010F616263643135343D64636261313534010F616263643135353D646362613135350100000000","proof":{"aunts":["50CBDC078A660EAE3442BA355BE10EE0D04408D1","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.645Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]} | |||
{"time":"2016-12-18T05:05:43.645Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"E815E0A63B7EEE7894DE2D72372A7C393434AC8ACCC46B60C628910F73351806D55A59994F08B454BFD71EDAA0CA95733CA47E37FFDAF9AAA2431A8160176E01"]}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.647Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]} | |||
{"time":"2016-12-18T05:05:43.647Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"9AAC3F3A118EE039EB460E9E5308D490D671C7490309BD5D62B5F392205C7E420DFDAF90F08294FF36BE8A9AA5CC203C1F2088B42D2BB8EE40A45F2BB5C54D0A"]}}],"peer_key":""}]} | |||
{"time":"2016-12-18T05:05:43.648Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]} |
@ -0,0 +1,127 @@ | |||
package consensus | |||
import ( | |||
"time" | |||
. "github.com/tendermint/go-common" | |||
) | |||
var ( | |||
tickTockBufferSize = 10 | |||
) | |||
// TimeoutTicker is a timer that schedules timeouts | |||
// conditional on the height/round/step in the timeoutInfo. | |||
// The timeoutInfo.Duration may be non-positive. | |||
type TimeoutTicker interface { | |||
Start() (bool, error) | |||
Stop() bool | |||
Chan() <-chan timeoutInfo // on which to receive a timeout | |||
ScheduleTimeout(ti timeoutInfo) // reset the timer | |||
} | |||
// timeoutTicker wraps time.Timer, | |||
// scheduling timeouts only for greater height/round/step | |||
// than what it's already seen. | |||
// Timeouts are scheduled along the tickChan, | |||
// and fired on the tockChan. | |||
type timeoutTicker struct { | |||
BaseService | |||
timer *time.Timer | |||
tickChan chan timeoutInfo | |||
tockChan chan timeoutInfo | |||
} | |||
func NewTimeoutTicker() TimeoutTicker { | |||
tt := &timeoutTicker{ | |||
timer: time.NewTimer(0), | |||
tickChan: make(chan timeoutInfo, tickTockBufferSize), | |||
tockChan: make(chan timeoutInfo, tickTockBufferSize), | |||
} | |||
tt.stopTimer() // don't want to fire until the first scheduled timeout | |||
tt.BaseService = *NewBaseService(log, "TimeoutTicker", tt) | |||
return tt | |||
} | |||
func (t *timeoutTicker) OnStart() error { | |||
t.BaseService.OnStart() | |||
go t.timeoutRoutine() | |||
return nil | |||
} | |||
func (t *timeoutTicker) OnStop() { | |||
t.BaseService.OnStop() | |||
t.stopTimer() | |||
} | |||
func (t *timeoutTicker) Chan() <-chan timeoutInfo { | |||
return t.tockChan | |||
} | |||
// The timeoutRoutine is alwaya available to read from tickChan (it won't block). | |||
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step. | |||
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) { | |||
t.tickChan <- ti | |||
} | |||
//------------------------------------------------------------- | |||
// stop the timer and drain if necessary | |||
func (t *timeoutTicker) stopTimer() { | |||
// Stop() returns false if it was already fired or was stopped | |||
if !t.timer.Stop() { | |||
select { | |||
case <-t.timer.C: | |||
default: | |||
log.Debug("Timer already stopped") | |||
} | |||
} | |||
} | |||
// send on tickChan to start a new timer. | |||
// timers are interupted and replaced by new ticks from later steps | |||
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan | |||
func (t *timeoutTicker) timeoutRoutine() { | |||
log.Debug("Starting timeout routine") | |||
var ti timeoutInfo | |||
for { | |||
select { | |||
case newti := <-t.tickChan: | |||
log.Debug("Received tick", "old_ti", ti, "new_ti", newti) | |||
// ignore tickers for old height/round/step | |||
if newti.Height < ti.Height { | |||
continue | |||
} else if newti.Height == ti.Height { | |||
if newti.Round < ti.Round { | |||
continue | |||
} else if newti.Round == ti.Round { | |||
if ti.Step > 0 && newti.Step <= ti.Step { | |||
continue | |||
} | |||
} | |||
} | |||
// stop the last timer | |||
t.stopTimer() | |||
// update timeoutInfo and reset timer | |||
// NOTE time.Timer allows duration to be non-positive | |||
ti = newti | |||
t.timer.Reset(ti.Duration) | |||
log.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) | |||
case <-t.timer.C: | |||
log.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) | |||
// go routine here gaurantees timeoutRoutine doesn't block. | |||
// Determinism comes from playback in the receiveRoutine. | |||
// We can eliminate it by merging the timeoutRoutine into receiveRoutine | |||
// and managing the timeouts ourselves with a millisecond ticker | |||
go func(toi timeoutInfo) { t.tockChan <- toi }(ti) | |||
case <-t.Quit: | |||
return | |||
} | |||
} | |||
} |
@ -1,78 +0,0 @@ | |||
package consensus | |||
import ( | |||
"io/ioutil" | |||
"os" | |||
"path" | |||
"strings" | |||
"testing" | |||
. "github.com/tendermint/go-common" | |||
) | |||
var testTxt = `{"time":"2016-01-16T04:42:00.390Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrevote"}]} | |||
{"time":"2016-01-16T04:42:00.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":1,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"4CC6845A128E723A299B470CCBB2A158612AA51321447F6492F3DA57D135C27FCF4124B3B19446A248252BDA45B152819C76AAA5FD35E1C07091885CE6955E05"}}],"peer_key":""}]} | |||
{"time":"2016-01-16T04:42:00.392Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrecommit"}]} | |||
{"time":"2016-01-16T04:42:00.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":2,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"1B9924E010F47E0817695DFE462C531196E5A12632434DE12180BBA3EFDAD6B3960FDB9357AFF085EB61729A7D4A6AD8408555D7569C87D9028F280192FD4E05"}}],"peer_key":""}]} | |||
{"time":"2016-01-16T04:42:00.393Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepCommit"}]} | |||
{"time":"2016-01-16T04:42:00.395Z","msg":[1,{"height":28220,"round":0,"step":"RoundStepNewHeight"}]}` | |||
func TestSeek(t *testing.T) { | |||
f, err := ioutil.TempFile(os.TempDir(), "seek_test_") | |||
if err != nil { | |||
panic(err) | |||
} | |||
stat, _ := f.Stat() | |||
name := stat.Name() | |||
_, err = f.WriteString(testTxt) | |||
if err != nil { | |||
panic(err) | |||
} | |||
f.Close() | |||
wal, err := NewWAL(path.Join(os.TempDir(), name), config.GetBool("cswal_light")) | |||
if err != nil { | |||
panic(err) | |||
} | |||
keyWord := "Precommit" | |||
n, err := wal.SeekFromEnd(func(b []byte) bool { | |||
if strings.Contains(string(b), keyWord) { | |||
return true | |||
} | |||
return false | |||
}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
// confirm n | |||
spl := strings.Split(testTxt, "\n") | |||
var i int | |||
var s string | |||
for i, s = range spl { | |||
if strings.Contains(s, keyWord) { | |||
break | |||
} | |||
} | |||
// n is lines from the end. | |||
spl = spl[i:] | |||
if n != len(spl) { | |||
panic(Fmt("Wrong nLines. Got %d, expected %d", n, len(spl))) | |||
} | |||
b, err := ioutil.ReadAll(wal.fp) | |||
if err != nil { | |||
panic(err) | |||
} | |||
// first char is a \n | |||
spl2 := strings.Split(strings.Trim(string(b), "\n"), "\n") | |||
for i, s := range spl { | |||
if s != spl2[i] { | |||
panic(Fmt("Mismatch. Got %s, expected %s", spl2[i], s)) | |||
} | |||
} | |||
} |
@ -1,9 +0,0 @@ | |||
package proxy | |||
type State interface { | |||
// TODO | |||
} | |||
type BlockStore interface { | |||
// TODO | |||
} |
@ -0,0 +1,25 @@ | |||
package core | |||
import ( | |||
ctypes "github.com/tendermint/tendermint/rpc/core/types" | |||
) | |||
//----------------------------------------------------------------------------- | |||
func ABCIQuery(query []byte) (*ctypes.ResultABCIQuery, error) { | |||
res := proxyAppQuery.QuerySync(query) | |||
return &ctypes.ResultABCIQuery{res}, nil | |||
} | |||
func ABCIInfo() (*ctypes.ResultABCIInfo, error) { | |||
res, err := proxyAppQuery.InfoSync() | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &ctypes.ResultABCIInfo{ | |||
Data: res.Data, | |||
Version: res.Version, | |||
LastBlockHeight: res.LastBlockHeight, | |||
LastBlockAppHash: res.LastBlockAppHash, | |||
}, nil | |||
} |
@ -1,17 +0,0 @@ | |||
package core | |||
import ( | |||
ctypes "github.com/tendermint/tendermint/rpc/core/types" | |||
) | |||
//----------------------------------------------------------------------------- | |||
func TMSPQuery(query []byte) (*ctypes.ResultTMSPQuery, error) { | |||
res := proxyAppQuery.QuerySync(query) | |||
return &ctypes.ResultTMSPQuery{res}, nil | |||
} | |||
func TMSPInfo() (*ctypes.ResultTMSPInfo, error) { | |||
res := proxyAppQuery.InfoSync() | |||
return &ctypes.ResultTMSPInfo{res}, nil | |||
} |
@ -0,0 +1,18 @@ | |||
package core_grpc | |||
import ( | |||
core "github.com/tendermint/tendermint/rpc/core" | |||
context "golang.org/x/net/context" | |||
) | |||
type broadcastAPI struct { | |||
} | |||
func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { | |||
res, err := core.BroadcastTxCommit(req.Tx) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &ResponseBroadcastTx{res.CheckTx, res.DeliverTx}, nil | |||
} |
@ -0,0 +1,44 @@ | |||
package core_grpc | |||
import ( | |||
"fmt" | |||
"net" | |||
"strings" | |||
"time" | |||
"google.golang.org/grpc" | |||
. "github.com/tendermint/go-common" | |||
) | |||
// Start the grpcServer in a go routine | |||
func StartGRPCServer(protoAddr string) (net.Listener, error) { | |||
parts := strings.SplitN(protoAddr, "://", 2) | |||
if len(parts) != 2 { | |||
return nil, fmt.Errorf("Invalid listen address for grpc server (did you forget a tcp:// prefix?) : %s", protoAddr) | |||
} | |||
proto, addr := parts[0], parts[1] | |||
ln, err := net.Listen(proto, addr) | |||
if err != nil { | |||
return nil, err | |||
} | |||
grpcServer := grpc.NewServer() | |||
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) | |||
go grpcServer.Serve(ln) | |||
return ln, nil | |||
} | |||
// Start the client by dialing the server | |||
func StartGRPCClient(protoAddr string) BroadcastAPIClient { | |||
conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return NewBroadcastAPIClient(conn) | |||
} | |||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { | |||
return Connect(addr) | |||
} |
@ -0,0 +1,3 @@ | |||
#! /bin/bash | |||
protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto |
@ -0,0 +1,174 @@ | |||
// Code generated by protoc-gen-go. | |||
// source: types.proto | |||
// DO NOT EDIT! | |||
/* | |||
Package core_grpc is a generated protocol buffer package. | |||
It is generated from these files: | |||
types.proto | |||
It has these top-level messages: | |||
RequestBroadcastTx | |||
ResponseBroadcastTx | |||
*/ | |||
package core_grpc | |||
import proto "github.com/golang/protobuf/proto" | |||
import fmt "fmt" | |||
import math "math" | |||
import types "github.com/tendermint/abci/types" | |||
import ( | |||
context "golang.org/x/net/context" | |||
grpc "google.golang.org/grpc" | |||
) | |||
// Reference imports to suppress errors if they are not otherwise used. | |||
var _ = proto.Marshal | |||
var _ = fmt.Errorf | |||
var _ = math.Inf | |||
// This is a compile-time assertion to ensure that this generated file | |||
// is compatible with the proto package it is being compiled against. | |||
// A compilation error at this line likely means your copy of the | |||
// proto package needs to be updated. | |||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | |||
type RequestBroadcastTx struct { | |||
Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` | |||
} | |||
func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } | |||
func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } | |||
func (*RequestBroadcastTx) ProtoMessage() {} | |||
func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | |||
func (m *RequestBroadcastTx) GetTx() []byte { | |||
if m != nil { | |||
return m.Tx | |||
} | |||
return nil | |||
} | |||
type ResponseBroadcastTx struct { | |||
CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` | |||
DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` | |||
} | |||
func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } | |||
func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } | |||
func (*ResponseBroadcastTx) ProtoMessage() {} | |||
func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } | |||
func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { | |||
if m != nil { | |||
return m.CheckTx | |||
} | |||
return nil | |||
} | |||
func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { | |||
if m != nil { | |||
return m.DeliverTx | |||
} | |||
return nil | |||
} | |||
func init() { | |||
proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx") | |||
proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx") | |||
} | |||
// Reference imports to suppress errors if they are not otherwise used. | |||
var _ context.Context | |||
var _ grpc.ClientConn | |||
// This is a compile-time assertion to ensure that this generated file | |||
// is compatible with the grpc package it is being compiled against. | |||
const _ = grpc.SupportPackageIsVersion4 | |||
// Client API for BroadcastAPI service | |||
type BroadcastAPIClient interface { | |||
BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) | |||
} | |||
type broadcastAPIClient struct { | |||
cc *grpc.ClientConn | |||
} | |||
func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { | |||
return &broadcastAPIClient{cc} | |||
} | |||
func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { | |||
out := new(ResponseBroadcastTx) | |||
err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
// Server API for BroadcastAPI service | |||
type BroadcastAPIServer interface { | |||
BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) | |||
} | |||
func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { | |||
s.RegisterService(&_BroadcastAPI_serviceDesc, srv) | |||
} | |||
func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(RequestBroadcastTx) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/core_grpc.BroadcastAPI/BroadcastTx", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ | |||
ServiceName: "core_grpc.BroadcastAPI", | |||
HandlerType: (*BroadcastAPIServer)(nil), | |||
Methods: []grpc.MethodDesc{ | |||
{ | |||
MethodName: "BroadcastTx", | |||
Handler: _BroadcastAPI_BroadcastTx_Handler, | |||
}, | |||
}, | |||
Streams: []grpc.StreamDesc{}, | |||
Metadata: "types.proto", | |||
} | |||
func init() { proto.RegisterFile("types.proto", fileDescriptor0) } | |||
var fileDescriptor0 = []byte{ | |||
// 226 bytes of a gzipped FileDescriptorProto | |||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, | |||
0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4c, 0xce, 0x2f, 0x4a, 0x8d, 0x4f, 0x2f, | |||
0x2a, 0x48, 0x96, 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f, | |||
0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xc9, 0x2d, 0x2e, 0xd0, 0x07, | |||
0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xa4, 0xc2, 0x25, 0x14, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0xe2, | |||
0x54, 0x94, 0x9f, 0x98, 0x92, 0x9c, 0x58, 0x5c, 0x12, 0x52, 0x21, 0xc4, 0xc7, 0xc5, 0x54, 0x52, | |||
0x21, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x13, 0xc4, 0x54, 0x52, 0xa1, 0x54, 0xc7, 0x25, 0x1c, 0x94, | |||
0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x8a, 0xac, 0xcc, 0x90, 0x8b, 0x23, 0x39, 0x23, 0x35, 0x39, | |||
0x3b, 0x1e, 0xaa, 0x98, 0xdb, 0x48, 0x4c, 0x0f, 0x62, 0x38, 0x4c, 0xb5, 0x33, 0x48, 0x3a, 0xa4, | |||
0x22, 0x88, 0x3d, 0x19, 0xc2, 0x10, 0x32, 0xe1, 0xe2, 0x4c, 0x2c, 0x28, 0x48, 0xcd, 0x4b, 0x01, | |||
0xe9, 0x61, 0x02, 0xeb, 0x11, 0x47, 0xd3, 0xe3, 0x08, 0x96, 0x0f, 0xa9, 0x08, 0xe2, 0x48, 0x84, | |||
0xb2, 0x8c, 0x62, 0xb8, 0x78, 0xe0, 0xf6, 0x3a, 0x06, 0x78, 0x0a, 0xf9, 0x70, 0x71, 0x23, 0xbb, | |||
0x43, 0x56, 0x0f, 0xee, 0x7d, 0x3d, 0x4c, 0xdf, 0x48, 0xc9, 0xa1, 0x48, 0x63, 0x78, 0x23, 0x89, | |||
0x0d, 0x1c, 0x14, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0x73, 0x87, 0xb0, 0x52, 0x01, | |||
0x00, 0x00, | |||
} |
@ -0,0 +1,29 @@ | |||
syntax = "proto3"; | |||
package core_grpc; | |||
import "github.com/tendermint/abci/types/types.proto"; | |||
//---------------------------------------- | |||
// Message types | |||
//---------------------------------------- | |||
// Request types | |||
message RequestBroadcastTx { | |||
bytes tx = 1; | |||
} | |||
//---------------------------------------- | |||
// Response types | |||
message ResponseBroadcastTx{ | |||
types.ResponseCheckTx check_tx = 1; | |||
types.ResponseDeliverTx deliver_tx = 2; | |||
} | |||
//---------------------------------------- | |||
// Service Definition | |||
service BroadcastAPI { | |||
rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ; | |||
} |
@ -0,0 +1,24 @@ | |||
package rpctest | |||
import ( | |||
"testing" | |||
"golang.org/x/net/context" | |||
"github.com/tendermint/tendermint/rpc/grpc" | |||
) | |||
//------------------------------------------- | |||
func TestBroadcastTx(t *testing.T) { | |||
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")}) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if res.CheckTx.Code != 0 { | |||
t.Fatalf("Non-zero check tx code: %d", res.CheckTx.Code) | |||
} | |||
if res.DeliverTx.Code != 0 { | |||
t.Fatalf("Non-zero append tx code: %d", res.DeliverTx.Code) | |||
} | |||
} |
@ -0,0 +1,13 @@ | |||
#! /bin/bash | |||
go get github.com/tendermint/abci/... | |||
# get the abci commit used by tendermint | |||
COMMIT=`bash scripts/glide/parse.sh abci` | |||
echo "Checking out vendored commit for abci: $COMMIT" | |||
cd $GOPATH/src/github.com/tendermint/abci | |||
git checkout $COMMIT | |||
glide install | |||
go install ./cmd/... |
@ -1,12 +0,0 @@ | |||
#! /bin/bash | |||
go get github.com/tendermint/tmsp/... | |||
# get the tmsp commit used by tendermint | |||
COMMIT=`bash scripts/glide/parse.sh $(pwd)/glide.lock tmsp` | |||
cd $GOPATH/src/github.com/tendermint/tmsp | |||
git checkout $COMMIT | |||
go install ./cmd/... | |||
@ -0,0 +1,19 @@ | |||
#! /bin/bash | |||
set -u | |||
function toHex() { | |||
echo -n $1 | hexdump -ve '1/1 "%.2X"' | |||
} | |||
N=$1 | |||
PORT=$2 | |||
for i in `seq 1 $N`; do | |||
# store key value pair | |||
KEY="abcd$i" | |||
VALUE="dcba$i" | |||
echo "$KEY:$VALUE" | |||
curl 127.0.0.1:$PORT/broadcast_tx_sync?tx=\"$(toHex $KEY=$VALUE)\" | |||
done | |||
@ -0,0 +1,55 @@ | |||
package state | |||
import ( | |||
. "github.com/tendermint/go-common" | |||
) | |||
type ( | |||
ErrInvalidBlock error | |||
ErrProxyAppConn error | |||
ErrUnknownBlock struct { | |||
height int | |||
} | |||
ErrBlockHashMismatch struct { | |||
coreHash []byte | |||
appHash []byte | |||
height int | |||
} | |||
ErrAppBlockHeightTooHigh struct { | |||
coreHeight int | |||
appHeight int | |||
} | |||
ErrLastStateMismatch struct { | |||
height int | |||
core []byte | |||
app []byte | |||
} | |||
ErrStateMismatch struct { | |||
got *State | |||
expected *State | |||
} | |||
) | |||
func (e ErrUnknownBlock) Error() string { | |||
return Fmt("Could not find block #%d", e.height) | |||
} | |||
func (e ErrBlockHashMismatch) Error() string { | |||
return Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.appHash, e.coreHash, e.height) | |||
} | |||
func (e ErrAppBlockHeightTooHigh) Error() string { | |||
return Fmt("App block height (%d) is higher than core (%d)", e.appHeight, e.coreHeight) | |||
} | |||
func (e ErrLastStateMismatch) Error() string { | |||
return Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.height, e.core, e.app) | |||
} | |||
func (e ErrStateMismatch) Error() string { | |||
return Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.got, e.expected) | |||
} |
@ -0,0 +1,210 @@ | |||
package state | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"path" | |||
"testing" | |||
"github.com/tendermint/tendermint/config/tendermint_test" | |||
// . "github.com/tendermint/go-common" | |||
cfg "github.com/tendermint/go-config" | |||
"github.com/tendermint/go-crypto" | |||
dbm "github.com/tendermint/go-db" | |||
"github.com/tendermint/tendermint/proxy" | |||
"github.com/tendermint/tendermint/types" | |||
"github.com/tendermint/abci/example/dummy" | |||
) | |||
var ( | |||
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("handshake_test")) | |||
chainID = "handshake_chain" | |||
nBlocks = 5 | |||
mempool = MockMempool{} | |||
testPartSize = 65536 | |||
) | |||
//--------------------------------------- | |||
// Test block execution | |||
func TestExecBlock(t *testing.T) { | |||
// TODO | |||
} | |||
//--------------------------------------- | |||
// Test handshake/replay | |||
// Sync from scratch | |||
func TestHandshakeReplayAll(t *testing.T) { | |||
testHandshakeReplay(t, 0) | |||
} | |||
// Sync many, not from scratch | |||
func TestHandshakeReplaySome(t *testing.T) { | |||
testHandshakeReplay(t, 1) | |||
} | |||
// Sync from lagging by one | |||
func TestHandshakeReplayOne(t *testing.T) { | |||
testHandshakeReplay(t, nBlocks-1) | |||
} | |||
// Sync from caught up | |||
func TestHandshakeReplayNone(t *testing.T) { | |||
testHandshakeReplay(t, nBlocks) | |||
} | |||
// Make some blocks. Start a fresh app and apply n blocks. Then restart the app and sync it up with the remaining blocks | |||
func testHandshakeReplay(t *testing.T, n int) { | |||
config := tendermint_test.ResetConfig("proxy_test_") | |||
state, store := stateAndStore(config) | |||
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "1"))) | |||
clientCreator2 := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "2"))) | |||
proxyApp := proxy.NewAppConns(config, clientCreator, NewHandshaker(config, state, store)) | |||
if _, err := proxyApp.Start(); err != nil { | |||
t.Fatalf("Error starting proxy app connections: %v", err) | |||
} | |||
chain := makeBlockchain(t, proxyApp, state) | |||
store.chain = chain // | |||
latestAppHash := state.AppHash | |||
proxyApp.Stop() | |||
if n > 0 { | |||
// start a new app without handshake, play n blocks | |||
proxyApp = proxy.NewAppConns(config, clientCreator2, nil) | |||
if _, err := proxyApp.Start(); err != nil { | |||
t.Fatalf("Error starting proxy app connections: %v", err) | |||
} | |||
state2, _ := stateAndStore(config) | |||
for i := 0; i < n; i++ { | |||
block := chain[i] | |||
err := state2.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
} | |||
proxyApp.Stop() | |||
} | |||
// now start it with the handshake | |||
handshaker := NewHandshaker(config, state, store) | |||
proxyApp = proxy.NewAppConns(config, clientCreator2, handshaker) | |||
if _, err := proxyApp.Start(); err != nil { | |||
t.Fatalf("Error starting proxy app connections: %v", err) | |||
} | |||
// get the latest app hash from the app | |||
res, err := proxyApp.Query().InfoSync() | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
// the app hash should be synced up | |||
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { | |||
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash) | |||
} | |||
if handshaker.nBlocks != nBlocks-n { | |||
t.Fatalf("Expected handshake to sync %d blocks, got %d", nBlocks-n, handshaker.nBlocks) | |||
} | |||
} | |||
//-------------------------- | |||
// utils for making blocks | |||
// make some bogus txs | |||
func txsFunc(blockNum int) (txs []types.Tx) { | |||
for i := 0; i < 10; i++ { | |||
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)})) | |||
} | |||
return txs | |||
} | |||
// sign a commit vote | |||
func signCommit(height, round int, hash []byte, header types.PartSetHeader) *types.Vote { | |||
vote := &types.Vote{ | |||
ValidatorIndex: 0, | |||
ValidatorAddress: privKey.PubKey().Address(), | |||
Height: height, | |||
Round: round, | |||
Type: types.VoteTypePrecommit, | |||
BlockID: types.BlockID{hash, header}, | |||
} | |||
sig := privKey.Sign(types.SignBytes(chainID, vote)) | |||
vote.Signature = sig | |||
return vote | |||
} | |||
// make a blockchain with one validator | |||
func makeBlockchain(t *testing.T, proxyApp proxy.AppConns, state *State) (blockchain []*types.Block) { | |||
prevHash := state.LastBlockID.Hash | |||
lastCommit := new(types.Commit) | |||
prevParts := types.PartSetHeader{} | |||
valHash := state.Validators.Hash() | |||
prevBlockID := types.BlockID{prevHash, prevParts} | |||
for i := 1; i < nBlocks+1; i++ { | |||
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit, | |||
prevBlockID, valHash, state.AppHash, testPartSize) | |||
fmt.Println(i) | |||
fmt.Println(prevBlockID) | |||
fmt.Println(block.LastBlockID) | |||
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool) | |||
if err != nil { | |||
t.Fatal(i, err) | |||
} | |||
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators) | |||
vote := signCommit(i, 0, block.Hash(), parts.Header()) | |||
_, err = voteSet.AddVote(vote) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
blockchain = append(blockchain, block) | |||
prevHash = block.Hash() | |||
prevParts = parts.Header() | |||
lastCommit = voteSet.MakeCommit() | |||
prevBlockID = types.BlockID{prevHash, prevParts} | |||
} | |||
return blockchain | |||
} | |||
// fresh state and mock store | |||
func stateAndStore(config cfg.Config) (*State, *mockBlockStore) { | |||
stateDB := dbm.NewMemDB() | |||
return MakeGenesisState(stateDB, &types.GenesisDoc{ | |||
ChainID: chainID, | |||
Validators: []types.GenesisValidator{ | |||
types.GenesisValidator{privKey.PubKey(), 10000, "test"}, | |||
}, | |||
AppHash: nil, | |||
}), NewMockBlockStore(config, nil) | |||
} | |||
//---------------------------------- | |||
// mock block store | |||
type mockBlockStore struct { | |||
config cfg.Config | |||
chain []*types.Block | |||
} | |||
func NewMockBlockStore(config cfg.Config, chain []*types.Block) *mockBlockStore { | |||
return &mockBlockStore{config, chain} | |||
} | |||
func (bs *mockBlockStore) Height() int { return len(bs.chain) } | |||
func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] } | |||
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta { | |||
block := bs.chain[height-1] | |||
return &types.BlockMeta{ | |||
Hash: block.Hash(), | |||
Header: block.Header, | |||
PartsHeader: block.MakePartSet(bs.config.GetInt("block_part_size")).Header(), | |||
} | |||
} |
@ -0,0 +1,42 @@ | |||
package state | |||
import ( | |||
"testing" | |||
dbm "github.com/tendermint/go-db" | |||
"github.com/tendermint/tendermint/config/tendermint_test" | |||
) | |||
func TestStateCopyEquals(t *testing.T) { | |||
config := tendermint_test.ResetConfig("state_") | |||
// Get State db | |||
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir")) | |||
state := GetState(config, stateDB) | |||
stateCopy := state.Copy() | |||
if !state.Equals(stateCopy) { | |||
t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", stateCopy, state) | |||
} | |||
stateCopy.LastBlockHeight += 1 | |||
if state.Equals(stateCopy) { | |||
t.Fatal("expected states to be different. got same %v", state) | |||
} | |||
} | |||
func TestStateSaveLoad(t *testing.T) { | |||
config := tendermint_test.ResetConfig("state_") | |||
// Get State db | |||
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir")) | |||
state := GetState(config, stateDB) | |||
state.LastBlockHeight += 1 | |||
state.Save() | |||
loadedState := LoadState(stateDB) | |||
if !state.Equals(loadedState) { | |||
t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", loadedState, state) | |||
} | |||
} |
@ -1,3 +1,4 @@ | |||
killall tendermint | |||
killall dummy | |||
killall counter | |||
rm -rf ~/.tendermint_app |
@ -0,0 +1,36 @@ | |||
package main | |||
import ( | |||
"encoding/hex" | |||
"fmt" | |||
"os" | |||
"golang.org/x/net/context" | |||
"github.com/tendermint/go-wire" | |||
"github.com/tendermint/tendermint/rpc/grpc" | |||
) | |||
var grpcAddr = "tcp://localhost:36656" | |||
func main() { | |||
args := os.Args | |||
if len(args) == 1 { | |||
fmt.Println("Must enter a transaction to send (hex)") | |||
os.Exit(1) | |||
} | |||
tx := args[1] | |||
txBytes, err := hex.DecodeString(tx) | |||
if err != nil { | |||
fmt.Println("Invalid hex", err) | |||
os.Exit(1) | |||
} | |||
clientGRPC := core_grpc.StartGRPCClient(grpcAddr) | |||
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{txBytes}) | |||
if err != nil { | |||
fmt.Println(err) | |||
os.Exit(1) | |||
} | |||
fmt.Println(string(wire.JSONBytes(res))) | |||
} |
@ -0,0 +1,26 @@ | |||
#! /bin/bash | |||
set -eu | |||
# grab glide for dependency mgmt | |||
go get github.com/Masterminds/glide | |||
# grab network monitor, install mintnet, netmon | |||
# these might err | |||
echo "... fetching repos. ignore go get errors" | |||
set +e | |||
go get github.com/tendermint/network_testing | |||
go get github.com/tendermint/mintnet | |||
go get github.com/tendermint/netmon | |||
set -e | |||
# install vendored deps | |||
echo "GOPATH $GOPATH" | |||
cd $GOPATH/src/github.com/tendermint/mintnet | |||
echo "... install mintnet dir $(pwd)" | |||
glide install | |||
go install | |||
cd $GOPATH/src/github.com/tendermint/netmon | |||
echo "... install netmon dir $(pwd)" | |||
glide install | |||
go install |
@ -0,0 +1,34 @@ | |||
#! /bin/bash | |||
set -eu | |||
# start a testnet and benchmark throughput using mintnet+netmon via the network_testing repo | |||
DATACENTER=single | |||
VALSETSIZE=4 | |||
BLOCKSIZE=8092 | |||
TX_SIZE=200 | |||
NTXS=$((BLOCKSIZE*4)) | |||
RESULTSDIR=results | |||
CLOUD_PROVIDER=digitalocean | |||
set +u | |||
if [[ "$MACH_PREFIX" == "" ]]; then | |||
MACH_PREFIX=mach | |||
fi | |||
set -u | |||
export TMHEAD=`git rev-parse --abbrev-ref HEAD` | |||
export TM_IMAGE="tendermint/tmbase" | |||
cd $GOPATH/src/github.com/tendermint/network_testing | |||
echo "... running network test $(pwd)" | |||
bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER | |||
# TODO: publish result! | |||
# cleanup | |||
echo "... destroying machines" | |||
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE] | |||
@ -1,58 +1,8 @@ | |||
#! /bin/bash | |||
set -eu | |||
# start a testnet and benchmark throughput using mintnet+netmon via the network_testing repo | |||
DATACENTER=single | |||
VALSETSIZE=4 | |||
BLOCKSIZE=8092 | |||
TX_SIZE=200 | |||
NTXS=$((BLOCKSIZE*4)) | |||
RESULTSDIR=results | |||
CLOUD_PROVIDER=digitalocean | |||
set +u | |||
if [[ "$MACH_PREFIX" == "" ]]; then | |||
MACH_PREFIX=mach | |||
fi | |||
set -u | |||
export TMHEAD=`git rev-parse --abbrev-ref HEAD` | |||
export TM_IMAGE="tendermint/tmbase" | |||
# grab glide for dependency mgmt | |||
go get github.com/Masterminds/glide | |||
# grab network monitor, install mintnet, netmon | |||
# these might err | |||
echo "... fetching repos. ignore go get errors" | |||
set +e | |||
go get github.com/tendermint/network_testing | |||
go get github.com/tendermint/mintnet | |||
go get github.com/tendermint/netmon | |||
set -e | |||
# install vendored deps | |||
echo "GOPATH $GOPATH" | |||
cd $GOPATH/src/github.com/tendermint/mintnet | |||
echo "... install mintnet dir $(pwd)" | |||
glide install | |||
go install | |||
cd $GOPATH/src/github.com/tendermint/netmon | |||
echo "... install netmon dir $(pwd)" | |||
glide install | |||
go install | |||
cd $GOPATH/src/github.com/tendermint/network_testing | |||
echo "... running network test $(pwd)" | |||
bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER | |||
# TODO: publish result! | |||
# cleanup | |||
echo "... destroying machines" | |||
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE] | |||
# install mintnet, netmon, fetch network_testing | |||
bash test/net/setup.sh | |||
# start the testnet | |||
bash test/net/start.sh |
@ -0,0 +1,53 @@ | |||
#! /bin/bash | |||
set -u | |||
N=$1 | |||
################################################################### | |||
# wait for all peers to come online | |||
# for each peer: | |||
# wait to have N-1 peers | |||
# wait to be at height > 1 | |||
################################################################### | |||
# wait for everyone to come online | |||
echo "Waiting for nodes to come online" | |||
for i in `seq 1 $N`; do | |||
addr=$(test/p2p/ip.sh $i):46657 | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
while [ "$ERR" != 0 ]; do | |||
sleep 1 | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
done | |||
echo "... node $i is up" | |||
done | |||
echo "" | |||
# wait for each of them to sync up | |||
for i in `seq 1 $N`; do | |||
addr=$(test/p2p/ip.sh $i):46657 | |||
N_1=$(($N - 1)) | |||
# - assert everyone has N-1 other peers | |||
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'` | |||
while [ "$N_PEERS" != $N_1 ]; do | |||
echo "Waiting for node $i to connect to all peers ..." | |||
sleep 1 | |||
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'` | |||
done | |||
# - assert block height is greater than 1 | |||
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
while [ "$BLOCK_HEIGHT" -le 1 ]; do | |||
echo "Waiting for node $i to commit a block ..." | |||
sleep 1 | |||
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
done | |||
echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT" | |||
done | |||
echo "" | |||
echo "PASS" | |||
echo "" |
@ -0,0 +1,43 @@ | |||
#! /bin/bash | |||
set -eu | |||
set -o pipefail | |||
ID=$1 | |||
########################################### | |||
# | |||
# Wait for peer to catchup to other peers | |||
# | |||
########################################### | |||
addr=$(test/p2p/ip.sh $ID):46657 | |||
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1 | |||
peer_addr=$(test/p2p/ip.sh $peerID):46657 | |||
# get another peer's height | |||
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height` | |||
# get another peer's state | |||
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash` | |||
echo "Other peer is on height $h1 with state $root1" | |||
echo "Waiting for peer $ID to catch up" | |||
# wait for it to sync to past its previous height | |||
set +e | |||
set +o pipefail | |||
h2="0" | |||
while [[ "$h2" -lt "$(($h1+3))" ]]; do | |||
sleep 1 | |||
h2=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
echo "... $h2" | |||
done | |||
# check the app hash | |||
root2=`curl -s $addr/status | jq .result[1].latest_app_hash` | |||
if [[ "$root1" != "$root2" ]]; then | |||
echo "App hash after fast sync does not match. Got $root2; expected $root1" | |||
exit 1 | |||
fi | |||
echo "... fast sync successful" |
@ -1,44 +1,16 @@ | |||
#! /bin/bash | |||
set -eu | |||
set -o pipefail | |||
############################################################### | |||
# for each peer: | |||
# kill peer | |||
# bring it back online via fast sync | |||
# check app hash | |||
############################################################### | |||
DOCKER_IMAGE=$1 | |||
NETWORK_NAME=$2 | |||
N=$3 | |||
PROXY_APP=$4 | |||
ID=$1 | |||
cd $GOPATH/src/github.com/tendermint/tendermint | |||
addr=$(test/p2p/ip.sh $ID):46657 | |||
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1 | |||
peer_addr=$(test/p2p/ip.sh $peerID):46657 | |||
# get another peer's height | |||
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height` | |||
# get another peer's state | |||
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash` | |||
echo "Other peer is on height $h1 with state $root1" | |||
echo "Waiting for peer $ID to catch up" | |||
# wait for it to sync to past its previous height | |||
set +e | |||
set +o pipefail | |||
h2="0" | |||
while [[ "$h2" -lt "$(($h1+3))" ]]; do | |||
sleep 1 | |||
h2=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
echo "... $h2" | |||
# run it on each of them | |||
for i in `seq 1 $N`; do | |||
bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N $PROXY_APP | |||
done | |||
# check the app hash | |||
root2=`curl -s $addr/status | jq .result[1].latest_app_hash` | |||
if [[ "$root1" != "$root2" ]]; then | |||
echo "App hash after fast sync does not match. Got $root2; expected $root1" | |||
exit 1 | |||
fi | |||
echo "... fast sync successful" |
@ -0,0 +1,38 @@ | |||
#! /bin/bash | |||
set -eu | |||
DOCKER_IMAGE=$1 | |||
NETWORK_NAME=$2 | |||
ID=$3 | |||
N=$4 | |||
PROXY_APP=$5 | |||
############################################################### | |||
# this runs on each peer: | |||
# kill peer | |||
# bring it back online via fast sync | |||
# wait for it to sync and check the app hash | |||
############################################################### | |||
echo "Testing fastsync on node $ID" | |||
# kill peer | |||
set +e # circle sigh :( | |||
docker rm -vf local_testnet_$ID | |||
set -e | |||
# restart peer - should have an empty blockchain | |||
SEEDS="$(test/p2p/ip.sh 1):46656" | |||
for j in `seq 2 $N`; do | |||
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656" | |||
done | |||
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP $SEEDS | |||
# wait for peer to sync and check the app hash | |||
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$ID "test/p2p/fast_sync/check_peer.sh $ID" | |||
echo "" | |||
echo "PASS" | |||
echo "" | |||
@ -0,0 +1,48 @@ | |||
#! /bin/bash | |||
set -eu | |||
NUM_OF_PEERS=$1 | |||
# how many attempts for each peer to catch up by height | |||
MAX_ATTEMPTS_TO_CATCH_UP=20 | |||
echo "Waiting for nodes to come online" | |||
set +e | |||
for i in $(seq 1 "$NUM_OF_PEERS"); do | |||
addr=$(test/p2p/ip.sh "$i"):46657 | |||
curl -s "$addr/status" > /dev/null | |||
ERR=$? | |||
while [ "$ERR" != 0 ]; do | |||
sleep 1 | |||
curl -s "$addr/status" > /dev/null | |||
ERR=$? | |||
done | |||
echo "... node $i is up" | |||
done | |||
set -e | |||
# get the first peer's height | |||
addr=$(test/p2p/ip.sh 1):46657 | |||
h1=$(curl -s "$addr/status" | jq .result[1].latest_block_height) | |||
echo "1st peer is on height $h1" | |||
echo "Waiting until other peers reporting a height higher than the 1st one" | |||
for i in $(seq 2 "$NUM_OF_PEERS"); do | |||
attempt=1 | |||
hi=0 | |||
while [[ $hi -le $h1 ]] ; do | |||
addr=$(test/p2p/ip.sh "$i"):46657 | |||
hi=$(curl -s "$addr/status" | jq .result[1].latest_block_height) | |||
echo "... peer $i is on height $hi" | |||
((attempt++)) | |||
if [ "$attempt" -ge $MAX_ATTEMPTS_TO_CATCH_UP ] ; then | |||
echo "$attempt unsuccessful attempts were made to catch up" | |||
exit 1 | |||
fi | |||
sleep 1 | |||
done | |||
done |
@ -0,0 +1,32 @@ | |||
#! /bin/bash | |||
set -eu | |||
DOCKER_IMAGE=$1 | |||
NETWORK_NAME=$2 | |||
NUM_OF_PEERS=$3 | |||
NUM_OF_CRASHES=$4 | |||
cd "$GOPATH/src/github.com/tendermint/tendermint" | |||
############################################################### | |||
# NUM_OF_CRASHES times: | |||
# restart all peers | |||
# wait for them to sync and check that they are making progress | |||
############################################################### | |||
for i in $(seq 1 "$NUM_OF_CRASHES"); do | |||
echo "" | |||
echo "Restarting all peers! Take $i ..." | |||
# restart all peers | |||
for j in $(seq 1 "$NUM_OF_PEERS"); do | |||
docker stop "local_testnet_$j" | |||
docker start "local_testnet_$j" | |||
done | |||
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" kill_all_$i "test/p2p/kill_all/check_peers.sh $NUM_OF_PEERS" | |||
done | |||
echo "" | |||
echo "PASS" | |||
echo "" |
@ -0,0 +1,12 @@ | |||
#! /bin/bash | |||
set -u | |||
NETWORK_NAME=$1 | |||
N=$2 | |||
for i in `seq 1 $N`; do | |||
docker stop local_testnet_$i | |||
docker rm -vf local_testnet_$i | |||
done | |||
docker network rm $NETWORK_NAME |
@ -0,0 +1,5 @@ | |||
#! /bin/bash | |||
cd $GOPATH/src/github.com/tendermint/tendermint | |||
bash ./test/persist/test_failure_indices.sh |
@ -0,0 +1,104 @@ | |||
#! /bin/bash | |||
export TMROOT=$HOME/.tendermint_persist | |||
rm -rf $TMROOT | |||
tendermint init | |||
function start_procs(){ | |||
name=$1 | |||
indexToFail=$2 | |||
echo "Starting persistent dummy and tendermint" | |||
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" & | |||
PID_DUMMY=$! | |||
if [[ "$indexToFail" == "" ]]; then | |||
# run in background, dont fail | |||
tendermint node --log_level=debug &> tendermint_${name}.log & | |||
PID_TENDERMINT=$! | |||
else | |||
# run in foreground, fail | |||
FAIL_TEST_INDEX=$indexToFail tendermint node --log_level=debug &> tendermint_${name}.log | |||
PID_TENDERMINT=$! | |||
fi | |||
} | |||
function kill_procs(){ | |||
kill -9 $PID_DUMMY $PID_TENDERMINT | |||
wait $PID_DUMMY | |||
wait $PID_TENDERMINT | |||
} | |||
# wait till node is up, send txs | |||
function send_txs(){ | |||
addr="127.0.0.1:46657" | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
while [ "$ERR" != 0 ]; do | |||
sleep 1 | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
done | |||
# send a bunch of txs over a few blocks | |||
echo "Node is up, sending txs" | |||
for i in `seq 1 5`; do | |||
for j in `seq 1 100`; do | |||
tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'` | |||
curl -s $addr/broadcast_tx_async?tx=0x$tx &> /dev/null | |||
done | |||
sleep 1 | |||
done | |||
} | |||
failsStart=0 | |||
fails=`grep -r "fail.Fail" --include \*.go . | wc -l` | |||
failsEnd=$(($fails-1)) | |||
for failIndex in `seq $failsStart $failsEnd`; do | |||
echo "" | |||
echo "* Test FailIndex $failIndex" | |||
# test failure at failIndex | |||
send_txs & | |||
start_procs 1 $failIndex | |||
# tendermint should fail when it hits the fail index | |||
kill -9 $PID_DUMMY | |||
wait $PID_DUMMY | |||
start_procs 2 | |||
# wait for node to handshake and make a new block | |||
addr="localhost:46657" | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
i=0 | |||
while [ "$ERR" != 0 ]; do | |||
sleep 1 | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
i=$(($i + 1)) | |||
if [[ $i == 10 ]]; then | |||
echo "Timed out waiting for tendermint to start" | |||
exit 1 | |||
fi | |||
done | |||
# wait for a new block | |||
h1=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
h2=$h1 | |||
while [ "$h2" == "$h1" ]; do | |||
sleep 1 | |||
h2=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
done | |||
kill_procs | |||
echo "* Passed Test for FailIndex $failIndex" | |||
echo "" | |||
done | |||
echo "Passed Test: Persistence" |
@ -0,0 +1,70 @@ | |||
#! /bin/bash | |||
export TMROOT=$HOME/.tendermint_persist | |||
rm -rf $TMROOT | |||
tendermint init | |||
function start_procs(){ | |||
name=$1 | |||
echo "Starting persistent dummy and tendermint" | |||
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" & | |||
PID_DUMMY=$! | |||
tendermint node &> tendermint_${name}.log & | |||
PID_TENDERMINT=$! | |||
sleep 5 | |||
} | |||
function kill_procs(){ | |||
kill -9 $PID_DUMMY $PID_TENDERMINT | |||
} | |||
function send_txs(){ | |||
# send a bunch of txs over a few blocks | |||
echo "Sending txs" | |||
for i in `seq 1 5`; do | |||
for j in `seq 1 100`; do | |||
tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'` | |||
curl -s 127.0.0.1:46657/broadcast_tx_async?tx=0x$tx &> /dev/null | |||
done | |||
sleep 1 | |||
done | |||
} | |||
start_procs 1 | |||
send_txs | |||
kill_procs | |||
start_procs 2 | |||
# wait for node to handshake and make a new block | |||
addr="localhost:46657" | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
i=0 | |||
while [ "$ERR" != 0 ]; do | |||
sleep 1 | |||
curl -s $addr/status > /dev/null | |||
ERR=$? | |||
i=$(($i + 1)) | |||
if [[ $i == 10 ]]; then | |||
echo "Timed out waiting for tendermint to start" | |||
exit 1 | |||
fi | |||
done | |||
# wait for a new block | |||
h1=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
h2=$h1 | |||
while [ "$h2" == "$h1" ]; do | |||
sleep 1 | |||
h2=`curl -s $addr/status | jq .result[1].latest_block_height` | |||
done | |||
kill_procs | |||
sleep 2 | |||
echo "Passed Test: Persistence" |