Browse Source

Merge pull request #366 from tendermint/release-0.8.0

Release 0.8.0
pull/311/merge v0.8.0
Jae Kwon 7 years ago
committed by GitHub
parent
commit
764091dfbb
116 changed files with 5254 additions and 2062 deletions
  1. +0
    -3
      .codecov.yml
  2. +2
    -0
      .gitignore
  3. +5
    -4
      Makefile
  4. +1
    -1
      README.md
  5. +26
    -18
      Vagrantfile
  6. +7
    -7
      blockchain/pool.go
  7. +17
    -16
      blockchain/reactor.go
  8. +17
    -6
      blockchain/store.go
  9. +3
    -1
      circle.yml
  10. +6
    -3
      cmd/tendermint/flags.go
  11. +3
    -3
      cmd/tendermint/main.go
  12. +1
    -1
      cmd/tendermint/reset_priv_validator.go
  13. +10
    -5
      config/tendermint/config.go
  14. +19
    -12
      config/tendermint_test/config.go
  15. +297
    -0
      consensus/byzantine_test.go
  16. +16
    -2
      consensus/common.go
  17. +249
    -231
      consensus/common_test.go
  18. +35
    -11
      consensus/height_vote_set.go
  19. +19
    -10
      consensus/height_vote_set_test.go
  20. +20
    -20
      consensus/mempool_test.go
  21. +330
    -100
      consensus/reactor.go
  22. +309
    -0
      consensus/reactor_test.go
  23. +73
    -74
      consensus/replay.go
  24. +81
    -65
      consensus/replay_test.go
  25. +236
    -250
      consensus/state.go
  26. +144
    -111
      consensus/state_test.go
  27. +5
    -5
      consensus/test_data/README.md
  28. +58
    -0
      consensus/test_data/build.sh
  29. +10
    -8
      consensus/test_data/empty_block.cswal
  30. +10
    -8
      consensus/test_data/small_block1.cswal
  31. +14
    -10
      consensus/test_data/small_block2.cswal
  32. +127
    -0
      consensus/ticker.go
  33. +59
    -92
      consensus/wal.go
  34. +0
    -78
      consensus/wal_test.go
  35. +51
    -35
      glide.lock
  36. +3
    -12
      glide.yaml
  37. +27
    -21
      mempool/mempool.go
  38. +13
    -13
      mempool/mempool_test.go
  39. +2
    -2
      mempool/reactor.go
  40. +29
    -39
      node/node.go
  41. +34
    -30
      proxy/app_conn.go
  42. +17
    -17
      proxy/app_conn_test.go
  43. +13
    -11
      proxy/client.go
  44. +29
    -21
      proxy/multi_app_conn.go
  45. +0
    -9
      proxy/state.go
  46. +25
    -0
      rpc/core/abci.go
  47. +34
    -36
      rpc/core/mempool.go
  48. +2
    -2
      rpc/core/pipe.go
  49. +7
    -7
      rpc/core/routes.go
  50. +0
    -17
      rpc/core/tmsp.go
  51. +22
    -12
      rpc/core/types/responses.go
  52. +18
    -0
      rpc/grpc/api.go
  53. +44
    -0
      rpc/grpc/client_server.go
  54. +3
    -0
      rpc/grpc/compile.sh
  55. +174
    -0
      rpc/grpc/types.pb.go
  56. +29
    -0
      rpc/grpc/types.proto
  57. +28
    -17
      rpc/test/client_test.go
  58. +24
    -0
      rpc/test/grpc_test.go
  59. +7
    -0
      rpc/test/helpers.go
  60. +2
    -0
      scripts/glide/parse.sh
  61. +2
    -0
      scripts/glide/update.sh
  62. +13
    -0
      scripts/install_abci_apps.sh
  63. +0
    -12
      scripts/install_tmsp_apps.sh
  64. +19
    -0
      scripts/txs/random.sh
  65. +55
    -0
      state/errors.go
  66. +332
    -74
      state/execution.go
  67. +210
    -0
      state/execution_test.go
  68. +95
    -15
      state/state.go
  69. +42
    -0
      state/state_test.go
  70. +1
    -0
      test/app/clean.sh
  71. +73
    -24
      test/app/counter_test.sh
  72. +18
    -12
      test/app/dummy_test.sh
  73. +36
    -0
      test/app/grpc_client.go
  74. +29
    -6
      test/app/test.sh
  75. +4
    -1
      test/docker/Dockerfile
  76. +26
    -0
      test/net/setup.sh
  77. +34
    -0
      test/net/start.sh
  78. +4
    -54
      test/net/test.sh
  79. +7
    -38
      test/p2p/atomic_broadcast/test.sh
  80. +53
    -0
      test/p2p/basic/test.sh
  81. +4
    -3
      test/p2p/client.sh
  82. +1
    -1
      test/p2p/data/app/init.sh
  83. +1
    -1
      test/p2p/data/core/init.sh
  84. +43
    -0
      test/p2p/fast_sync/check_peer.sh
  85. +8
    -36
      test/p2p/fast_sync/test.sh
  86. +38
    -0
      test/p2p/fast_sync/test_peer.sh
  87. +48
    -0
      test/p2p/kill_all/check_peers.sh
  88. +32
    -0
      test/p2p/kill_all/test.sh
  89. +3
    -2
      test/p2p/local_testnet_start.sh
  90. +12
    -0
      test/p2p/local_testnet_stop.sh
  91. +8
    -7
      test/p2p/peer.sh
  92. +21
    -26
      test/p2p/test.sh
  93. +5
    -0
      test/persist/test.sh
  94. +104
    -0
      test/persist/test_failure_indices.sh
  95. +70
    -0
      test/persist/test_simple.sh
  96. +3
    -0
      test/run_test.sh
  97. +6
    -2
      test/test.sh
  98. +1
    -1
      test/test_cover.sh
  99. +4
    -1
      test/test_libs.sh
  100. +90
    -33
      types/block.go

+ 0
- 3
.codecov.yml View File

@ -14,9 +14,6 @@ coverage:
project:
default:
threshold: 1% # allow this much decrease on project
patch:
default:
threshold: 50% # allow this much decrease on patch
changes: false
comment:


+ 2
- 0
.gitignore View File

@ -10,3 +10,5 @@ rpc/test/.tendermint
remote_dump
.revision
vendor
.vagrant
test/p2p/data/

+ 5
- 4
Makefile View File

@ -12,19 +12,19 @@ NOVENDOR = go list github.com/tendermint/tendermint/... | grep -v /vendor/
install: get_deps
go install github.com/tendermint/tendermint/cmd/tendermint
build:
build:
go build -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
build_race:
build_race:
go build -race -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
test: build
go test `${NOVENDOR}`
test_race: build
go test -race `${NOVENDOR}`
test_integrations:
test_integrations:
bash ./test/test.sh
test100: build
@ -48,6 +48,7 @@ get_deps:
get_vendor_deps:
go get github.com/Masterminds/glide
rm -rf vendor/
glide install
update_deps:


+ 1
- 1
README.md View File

@ -45,7 +45,7 @@ Yay open source! Please see our [contributing guidelines](https://tendermint.com
### Sub-projects
* [TMSP](http://github.com/tendermint/tmsp)
* [ABCI](http://github.com/tendermint/abci)
* [Mintnet](http://github.com/tendermint/mintnet)
* [Go-Wire](http://github.com/tendermint/go-wire)
* [Go-P2P](http://github.com/tendermint/go-p2p)


+ 26
- 18
Vagrantfile View File

@ -1,25 +1,33 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/trusty64"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "phusion-open-ubuntu-14.04-amd64"
config.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vbox.box"
# Or, for Ubuntu 12.04:
config.vm.provider :vmware_fusion do |f, override|
override.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vmwarefusion.box"
config.vm.provider "virtualbox" do |v|
v.memory = 2048
v.cpus = 2
end
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
# Install Docker
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
# Add vagrant user to the docker group
pkg_cmd << "usermod -a -G docker vagrant; "
config.vm.provision :shell, :inline => pkg_cmd
end
config.vm.provision "shell", inline: <<-SHELL
apt-get update
apt-get install -y --no-install-recommends wget curl jq shellcheck bsdmainutils psmisc
wget -qO- https://get.docker.com/ | sh
usermod -a -G docker vagrant
curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz
tar -xvf go1.7.linux-amd64.tar.gz
mv go /usr/local
echo 'export PATH=$PATH:/usr/local/go/bin' >> /home/vagrant/.profile
mkdir -p /home/vagrant/go/bin
chown -R vagrant:vagrant /home/vagrant/go
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.profile
mkdir -p /home/vagrant/go/src/github.com/tendermint
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
su - vagrant -c 'curl https://glide.sh/get | sh'
su - vagrant -c 'cd /vagrant/ && glide install && make test'
SHELL
end

+ 7
- 7
blockchain/pool.go View File

@ -32,7 +32,7 @@ var peerTimeoutSeconds = time.Duration(15) // not const so we can override with
*/
type BlockPool struct {
QuitService
BaseService
startTime time.Time
mtx sync.Mutex
@ -58,19 +58,19 @@ func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- s
requestsCh: requestsCh,
timeoutsCh: timeoutsCh,
}
bp.QuitService = *NewQuitService(log, "BlockPool", bp)
bp.BaseService = *NewBaseService(log, "BlockPool", bp)
return bp
}
func (pool *BlockPool) OnStart() error {
pool.QuitService.OnStart()
pool.BaseService.OnStart()
go pool.makeRequestersRoutine()
pool.startTime = time.Now()
return nil
}
func (pool *BlockPool) OnStop() {
pool.QuitService.OnStop()
pool.BaseService.OnStop()
}
// Run spawns requesters as needed.
@ -383,7 +383,7 @@ func (peer *bpPeer) onTimeout() {
//-------------------------------------
type bpRequester struct {
QuitService
BaseService
pool *BlockPool
height int
gotBlockCh chan struct{}
@ -404,12 +404,12 @@ func newBPRequester(pool *BlockPool, height int) *bpRequester {
peerID: "",
block: nil,
}
bpr.QuitService = *NewQuitService(nil, "bpRequester", bpr)
bpr.BaseService = *NewBaseService(nil, "bpRequester", bpr)
return bpr
}
func (bpr *bpRequester) OnStart() error {
bpr.QuitService.OnStart()
bpr.BaseService.OnStart()
go bpr.requestRoutine()
return nil
}


+ 17
- 16
blockchain/reactor.go View File

@ -8,6 +8,7 @@ import (
"time"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/proxy"
@ -41,7 +42,7 @@ type consensusReactor interface {
type BlockchainReactor struct {
p2p.BaseReactor
sw *p2p.Switch
config cfg.Config
state *sm.State
proxyAppConn proxy.AppConnConsensus // same as consensus.proxyAppConn
store *BlockStore
@ -54,7 +55,7 @@ type BlockchainReactor struct {
evsw types.EventSwitch
}
func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
if state.LastBlockHeight == store.Height()-1 {
store.height -= 1 // XXX HACK, make this better
}
@ -69,6 +70,7 @@ func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus,
timeoutsCh,
)
bcR := &BlockchainReactor{
config: config,
state: state,
proxyAppConn: proxyAppConn,
store: store,
@ -219,33 +221,32 @@ FOR_LOOP:
// We need both to sync the first block.
break SYNC_LOOP
}
firstParts := first.MakePartSet()
firstParts := first.MakePartSet(bcR.config.GetInt("block_part_size")) // TODO: put part size in parts header?
firstPartsHeader := firstParts.Header()
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := bcR.state.Validators.VerifyCommit(
bcR.state.ChainID, first.Hash(), firstPartsHeader, first.Height, second.LastCommit)
bcR.state.ChainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit)
if err != nil {
log.Info("error in validation", "error", err)
bcR.pool.RedoRequest(first.Height)
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
// TODO: use ApplyBlock instead of Exec/Commit/SetAppHash/Save
err := bcR.state.ExecBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader)
if err != nil {
// TODO This is bad, are we zombie?
PanicQ(Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: should we be firing events? need to fire NewBlock events manually ...
// NOTE: we could improve performance if we
// didn't make the app commit to disk every block
// ... but we would need a way to get the hash without it persisting
res := bcR.proxyAppConn.CommitSync()
if res.IsErr() {
// TODO Handle gracefully.
PanicQ(Fmt("Failed to commit block at application: %v", res))
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, sm.MockMempool{})
if err != nil {
// TODO This is bad, are we zombie?
PanicQ(Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state.AppHash = res.Data
bcR.state.Save()
}
}


+ 17
- 6
blockchain/store.go View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"sync"
. "github.com/tendermint/go-common"
dbm "github.com/tendermint/go-db"
@ -27,8 +28,10 @@ the Commit data outside the Block.
Panics indicate probable corruption in the data
*/
type BlockStore struct {
db dbm.DB
mtx sync.RWMutex
height int
db dbm.DB
}
func NewBlockStore(db dbm.DB) *BlockStore {
@ -41,6 +44,8 @@ func NewBlockStore(db dbm.DB) *BlockStore {
// Height() returns the last known contiguous block height.
func (bs *BlockStore) Height() int {
bs.mtx.RLock()
defer bs.mtx.RUnlock()
return bs.height
}
@ -141,8 +146,8 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
// most recent height. Otherwise they'd stall at H-1.
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
height := block.Height
if height != bs.height+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
if !blockParts.IsComplete() {
PanicSanity(Fmt("BlockStore can only save complete block part sets"))
@ -163,6 +168,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes)
// Save seen commit (seen +2/3 precommits for block)
// NOTE: we can delete this at a later height
seenCommitBytes := wire.BinaryBytes(seenCommit)
bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
@ -170,12 +176,17 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
BlockStoreStateJSON{Height: height}.Save(bs.db)
// Done!
bs.mtx.Lock()
bs.height = height
bs.mtx.Unlock()
// Flush
bs.db.SetSync(nil, nil)
}
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
if height != bs.height+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := wire.BinaryBytes(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@ -212,7 +223,7 @@ func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
if err != nil {
PanicSanity(Fmt("Could not marshal state bytes: %v", err))
}
db.Set(blockStoreKey, bytes)
db.SetSync(blockStoreKey, bytes)
}
func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {


+ 3
- 1
circle.yml View File

@ -29,7 +29,9 @@ dependencies:
test:
override:
- "cd $REPO && make test_integrations"
- "cd $REPO && set -o pipefail && make test_integrations | tee ~/test_integrations.log":
timeout: 1800
- "cp ~/test_integrations.log $CIRCLE_ARTIFACTS"
post:
- "cd $REPO && bash <(curl -s https://codecov.io/bash)"


+ 6
- 3
cmd/tendermint/flags.go View File

@ -16,9 +16,10 @@ func parseFlags(config cfg.Config, args []string) {
fastSync bool
skipUPNP bool
rpcLaddr string
grpcLaddr string
logLevel string
proxyApp string
tmspTransport string
abciTransport string
pex bool
)
@ -34,10 +35,11 @@ func parseFlags(config cfg.Config, args []string) {
flags.BoolVar(&fastSync, "fast_sync", config.GetBool("fast_sync"), "Fast blockchain syncing")
flags.BoolVar(&skipUPNP, "skip_upnp", config.GetBool("skip_upnp"), "Skip UPNP configuration")
flags.StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc_laddr"), "RPC listen address. Port required")
flags.StringVar(&grpcLaddr, "grpc_laddr", config.GetString("grpc_laddr"), "GRPC listen address (BroadcastTx only). Port required")
flags.StringVar(&logLevel, "log_level", config.GetString("log_level"), "Log level")
flags.StringVar(&proxyApp, "proxy_app", config.GetString("proxy_app"),
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
flags.StringVar(&tmspTransport, "tmsp", config.GetString("tmsp"), "Specify tmsp transport (socket | grpc)")
flags.StringVar(&abciTransport, "abci", config.GetString("abci"), "Specify abci transport (socket | grpc)")
// feature flags
flags.BoolVar(&pex, "pex", config.GetBool("pex_reactor"), "Enable Peer-Exchange (dev feature)")
@ -55,9 +57,10 @@ func parseFlags(config cfg.Config, args []string) {
config.Set("fast_sync", fastSync)
config.Set("skip_upnp", skipUPNP)
config.Set("rpc_laddr", rpcLaddr)
config.Set("grpc_laddr", grpcLaddr)
config.Set("log_level", logLevel)
config.Set("proxy_app", proxyApp)
config.Set("tmsp", tmspTransport)
config.Set("abci", abciTransport)
config.Set("pex_reactor", pex)
}

+ 3
- 3
cmd/tendermint/main.go View File

@ -41,10 +41,10 @@ Commands:
case "node":
node.RunNode(config)
case "replay":
if len(args) > 1 && args[1] == "console" {
node.RunReplayConsole(config)
if len(args) > 2 && args[1] == "console" {
node.RunReplayConsole(config, args[2])
} else {
node.RunReplay(config)
node.RunReplay(config, args[1])
}
case "init":
init_files()


+ 1
- 1
cmd/tendermint/reset_priv_validator.go View File

@ -11,7 +11,7 @@ import (
func reset_all() {
reset_priv_validator()
os.RemoveAll(config.GetString("db_dir"))
os.Remove(config.GetString("cswal"))
os.RemoveAll(config.GetString("cs_wal_dir"))
}
// NOTE: this is totally unsafe.


+ 10
- 5
config/tendermint/config.go View File

@ -22,6 +22,7 @@ func getTMRoot(rootDir string) string {
func initTMRoot(rootDir string) {
rootDir = getTMRoot(rootDir)
EnsureDir(rootDir, 0700)
EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
@ -53,7 +54,7 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetRequired("chain_id") // blows up if you try to use it before setting.
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
mapConfig.SetDefault("proxy_app", "tcp://127.0.0.1:46658")
mapConfig.SetDefault("tmsp", "socket")
mapConfig.SetDefault("abci", "socket")
mapConfig.SetDefault("moniker", "anonymous")
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:46656")
mapConfig.SetDefault("seeds", "")
@ -68,13 +69,15 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("db_dir", rootDir+"/data")
mapConfig.SetDefault("log_level", "info")
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:46657")
mapConfig.SetDefault("grpc_laddr", "")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cswal", rootDir+"/data/cswal")
mapConfig.SetDefault("cswal_light", false)
mapConfig.SetDefault("cs_wal_dir", rootDir+"/data/cs.wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)
mapConfig.SetDefault("block_size", 10000)
mapConfig.SetDefault("block_size", 10000) // max number of txs
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false)
mapConfig.SetDefault("timeout_propose", 3000)
mapConfig.SetDefault("timeout_propose_delta", 500)
@ -83,10 +86,12 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("timeout_precommit", 1000)
mapConfig.SetDefault("timeout_precommit_delta", 500)
mapConfig.SetDefault("timeout_commit", 1000)
// make progress asap (no `timeout_commit`) on full precommit votes
mapConfig.SetDefault("skip_timeout_commit", false)
mapConfig.SetDefault("mempool_recheck", true)
mapConfig.SetDefault("mempool_recheck_empty", true)
mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal", rootDir+"/data/mempool_wal")
mapConfig.SetDefault("mempool_wal_dir", rootDir+"/data/mempool.wal")
return mapConfig
}


+ 19
- 12
config/tendermint_test/config.go View File

@ -9,6 +9,7 @@ import (
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-logger"
)
func init() {
@ -33,6 +34,7 @@ func initTMRoot(rootDir string) {
}
// Create new dir
EnsureDir(rootDir, 0700)
EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
genesisFilePath := path.Join(rootDir, "genesis.json")
@ -68,7 +70,7 @@ func ResetConfig(localPath string) cfg.Config {
mapConfig.SetDefault("chain_id", "tendermint_test")
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
mapConfig.SetDefault("proxy_app", "dummy")
mapConfig.SetDefault("tmsp", "socket")
mapConfig.SetDefault("abci", "socket")
mapConfig.SetDefault("moniker", "anonymous")
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:36656")
mapConfig.SetDefault("fast_sync", false)
@ -79,27 +81,32 @@ func ResetConfig(localPath string) cfg.Config {
mapConfig.SetDefault("priv_validator_file", rootDir+"/priv_validator.json")
mapConfig.SetDefault("db_backend", "memdb")
mapConfig.SetDefault("db_dir", rootDir+"/data")
mapConfig.SetDefault("log_level", "debug")
mapConfig.SetDefault("log_level", "info")
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:36657")
mapConfig.SetDefault("grpc_laddr", "tcp://0.0.0.0:36658")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cswal", rootDir+"/data/cswal")
mapConfig.SetDefault("cswal_light", false)
mapConfig.SetDefault("cs_wal_dir", rootDir+"/data/cs.wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)
mapConfig.SetDefault("block_size", 10000)
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false)
mapConfig.SetDefault("timeout_propose", 2000)
mapConfig.SetDefault("timeout_propose_delta", 500)
mapConfig.SetDefault("timeout_prevote", 1000)
mapConfig.SetDefault("timeout_prevote_delta", 500)
mapConfig.SetDefault("timeout_precommit", 1000)
mapConfig.SetDefault("timeout_precommit_delta", 500)
mapConfig.SetDefault("timeout_commit", 100)
mapConfig.SetDefault("timeout_propose_delta", 1)
mapConfig.SetDefault("timeout_prevote", 10)
mapConfig.SetDefault("timeout_prevote_delta", 1)
mapConfig.SetDefault("timeout_precommit", 10)
mapConfig.SetDefault("timeout_precommit_delta", 1)
mapConfig.SetDefault("timeout_commit", 10)
mapConfig.SetDefault("skip_timeout_commit", true)
mapConfig.SetDefault("mempool_recheck", true)
mapConfig.SetDefault("mempool_recheck_empty", true)
mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal", "")
mapConfig.SetDefault("mempool_wal_dir", "")
logger.SetLogLevel(mapConfig.GetString("log_level"))
return mapConfig
}
@ -113,7 +120,7 @@ node_laddr = "tcp://0.0.0.0:36656"
seeds = ""
fast_sync = false
db_backend = "memdb"
log_level = "debug"
log_level = "info"
rpc_laddr = "tcp://0.0.0.0:36657"
`


+ 297
- 0
consensus/byzantine_test.go View File

@ -0,0 +1,297 @@
package consensus
import (
"sync"
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/types"
)
func init() {
config = tendermint_test.ResetConfig("consensus_byzantine_test")
}
//----------------------------------------------
// byzantine failures
// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
// byzantine validator sends conflicting proposals into A and B,
// and prevotes/precommits on both of them.
// B sees a commit, A doesn't.
// Byzantine validator refuses to prevote.
// Heal partition and ensure A sees the commit
func TestByzantine(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
// give the byzantine validator a normal ticker
css[0].SetTimeoutTicker(NewTimeoutTicker())
switches := make([]*p2p.Switch, N)
for i := 0; i < N; i++ {
switches[i] = p2p.NewSwitch(cfg.NewMapConfig(nil))
}
reactors := make([]p2p.Reactor, N)
defer func() {
for _, r := range reactors {
if rr, ok := r.(*ByzantineReactor); ok {
rr.reactor.Switch.Stop()
} else {
r.(*ConsensusReactor).Switch.Stop()
}
}
}()
eventChans := make([]chan interface{}, N)
for i := 0; i < N; i++ {
if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator.(*types.PrivValidator))
// make byzantine
css[i].decideProposal = func(j int) func(int, int) {
return func(height, round int) {
byzantineDecideProposalFunc(height, round, css[j], switches[j])
}
}(i)
css[i].doPrevote = func(height, round int) {}
}
eventSwitch := events.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
}
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
conR.SetEventSwitch(eventSwitch)
var conRI p2p.Reactor
conRI = conR
if i == 0 {
conRI = NewByzantineReactor(conR)
}
reactors[i] = conRI
}
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
// ignore new switch s, we already made ours
switches[i].AddReactor("CONSENSUS", reactors[i])
return switches[i]
}, func(sws []*p2p.Switch, i, j int) {
// the network starts partitioned with globally active adversary
if i != 0 {
return
}
p2p.Connect2Switches(sws, i, j)
})
// start the state machines
byzR := reactors[0].(*ByzantineReactor)
s := byzR.reactor.conS.GetState()
byzR.reactor.SwitchToConsensus(s)
for i := 1; i < N; i++ {
cr := reactors[i].(*ConsensusReactor)
cr.SwitchToConsensus(cr.conS.GetState())
}
// byz proposer sends one block to peers[0]
// and the other block to peers[1] and peers[2].
// note peers and switches order don't match.
peers := switches[0].Peers().List()
ind0 := getSwitchIndex(switches, peers[0])
ind1 := getSwitchIndex(switches, peers[1])
ind2 := getSwitchIndex(switches, peers[2])
// connect the 2 peers in the larger partition
p2p.Connect2Switches(switches, ind1, ind2)
// wait for someone in the big partition to make a block
select {
case <-eventChans[ind2]:
}
log.Notice("A block has been committed. Healing partition")
// connect the partitions
p2p.Connect2Switches(switches, ind0, ind1)
p2p.Connect2Switches(switches, ind0, ind2)
// wait till everyone makes the first new block
// (one of them already has)
wg := new(sync.WaitGroup)
wg.Add(2)
for i := 1; i < N-1; i++ {
go func(j int) {
<-eventChans[j]
wg.Done()
}(i)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
tick := time.NewTicker(time.Second * 10)
select {
case <-done:
case <-tick.C:
for i, reactor := range reactors {
t.Log(Fmt("Consensus Reactor %v", i))
t.Log(Fmt("%v", reactor))
}
t.Fatalf("Timed out waiting for all validators to commit first block")
}
}
//-------------------------------
// byzantine consensus functions
func byzantineDecideProposalFunc(height, round int, cs *ConsensusState, sw *p2p.Switch) {
// byzantine user should create two proposals and try to split the vote.
// Avoid sending on internalMsgQueue and running consensus state.
// Create a new proposal block from state/txs from the mempool.
block1, blockParts1 := cs.createProposalBlock()
polRound, polBlockID := cs.Votes.POLInfo()
proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal1) // byzantine doesnt err
// Create a new proposal block from state/txs from the mempool.
block2, blockParts2 := cs.createProposalBlock()
polRound, polBlockID = cs.Votes.POLInfo()
proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal2) // byzantine doesnt err
block1Hash := block1.Hash()
block2Hash := block2.Hash()
// broadcast conflicting proposals/block parts to peers
peers := sw.Peers().List()
log.Notice("Byzantine: broadcasting conflicting proposals", "peers", len(peers))
for i, peer := range peers {
if i < len(peers)/2 {
go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
} else {
go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
}
}
}
func sendProposalAndParts(height, round int, cs *ConsensusState, peer *p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
// proposal
msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
// parts
for i := 0; i < parts.Total(); i++ {
part := parts.GetPart(i)
msg := &BlockPartMessage{
Height: height, // This tells peer that this part applies to us.
Round: round, // This tells peer that this part applies to us.
Part: part,
}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
}
// votes
cs.mtx.Lock()
prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header())
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
cs.mtx.Unlock()
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}})
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}})
}
//----------------------------------------
// byzantine consensus reactor
type ByzantineReactor struct {
Service
reactor *ConsensusReactor
}
func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor {
return &ByzantineReactor{
Service: conR,
reactor: conR,
}
}
func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
if !br.reactor.IsRunning() {
return
}
// Create peerState for peer
peerState := NewPeerState(peer)
peer.Data.Set(types.PeerStateKey, peerState)
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !br.reactor.fastSync {
br.reactor.sendNewRoundStepMessage(peer)
}
}
func (br *ByzantineReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
br.reactor.RemovePeer(peer, reason)
}
func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes)
}
//----------------------------------------
// byzantine privValidator
type ByzantinePrivValidator struct {
Address []byte `json:"address"`
types.Signer `json:"-"`
mtx sync.Mutex
}
// Return a priv validator that will sign anything
func NewByzantinePrivValidator(pv *types.PrivValidator) *ByzantinePrivValidator {
return &ByzantinePrivValidator{
Address: pv.Address,
Signer: pv.Signer,
}
}
func (privVal *ByzantinePrivValidator) GetAddress() []byte {
return privVal.Address
}
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
// Sign
vote.Signature = privVal.Sign(types.SignBytes(chainID, vote))
return nil
}
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
// Sign
proposal.Signature = privVal.Sign(types.SignBytes(chainID, proposal))
return nil
}
func (privVal *ByzantinePrivValidator) String() string {
return Fmt("PrivValidator{%X}", privVal.Address)
}

+ 16
- 2
consensus/common.go View File

@ -4,12 +4,26 @@ import (
"github.com/tendermint/tendermint/types"
)
// NOTE: this is blocking
// XXX: WARNING: these functions can halt the consensus as firing events is synchronous.
// Make sure to read off the channels, and in the case of subscribeToEventRespond, to write back on it
// NOTE: if chanCap=0, this blocks on the event being consumed
func subscribeToEvent(evsw types.EventSwitch, receiver, eventID string, chanCap int) chan interface{} {
// listen for new round
// listen for event
ch := make(chan interface{}, chanCap)
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
ch <- data
})
return ch
}
// NOTE: this blocks on receiving a response after the event is consumed
func subscribeToEventRespond(evsw types.EventSwitch, receiver, eventID string) chan interface{} {
// listen for event
ch := make(chan interface{})
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
ch <- data
<-ch
})
return ch
}

+ 249
- 231
consensus/common_test.go View File

@ -3,52 +3,72 @@ package consensus
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"sync"
"testing"
"time"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/config/tendermint_test"
mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmspcli "github.com/tendermint/tmsp/client"
tmsp "github.com/tendermint/tmsp/types"
abcicli "github.com/tendermint/abci/client"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/tmsp/example/counter"
"github.com/tendermint/tmsp/example/dummy"
"github.com/tendermint/abci/example/counter"
"github.com/tendermint/abci/example/dummy"
)
var config cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Duration(2)
func ensureDir(dir string, mode os.FileMode) {
if err := EnsureDir(dir, mode); err != nil {
panic(err)
}
}
//-------------------------------------------------------------------------------
// validator stub (a dummy consensus peer we control)
type validatorStub struct {
Index int // Validator index. NOTE: we don't assume validator set changes.
Height int
Round int
*types.PrivValidator
}
func NewValidatorStub(privValidator *types.PrivValidator) *validatorStub {
var testMinPower = 10
func NewValidatorStub(privValidator *types.PrivValidator, valIndex int) *validatorStub {
return &validatorStub{
Index: valIndex,
PrivValidator: privValidator,
}
}
func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
vote := &types.Vote{
ValidatorIndex: vs.Index,
ValidatorAddress: vs.PrivValidator.Address,
Height: vs.Height,
Round: vs.Round,
Type: voteType,
BlockHash: hash,
BlockPartsHeader: header,
BlockID: types.BlockID{hash, header},
}
err := vs.PrivValidator.SignVote(config.GetString("chain_id"), vote)
return vote, err
}
// convenienve function for testing
// Sign vote for type/hash/header
func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote {
v, err := vs.signVote(voteType, hash, header)
if err != nil {
@ -57,102 +77,7 @@ func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSe
return v
}
// create proposal block from cs1 but sign it with vs
func decideProposal(cs1 *ConsensusState, cs2 *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
block, blockParts := cs1.createProposalBlock()
if block == nil { // on error
panic("error creating proposal block")
}
// Make proposal
proposal = types.NewProposal(height, round, blockParts.Header(), cs1.Votes.POLRound())
if err := cs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
panic(err)
}
return
}
//-------------------------------------------------------------------------------
// utils
/*
func nilRound(t *testing.T, cs1 *ConsensusState, vss ...*validatorStub) {
cs1.mtx.Lock()
height, round := cs1.Height, cs1.Round
cs1.mtx.Unlock()
waitFor(t, cs1, height, round, RoundStepPrevote)
signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, cs1.ProposalBlockParts.Header(), vss...)
waitFor(t, cs1, height, round, RoundStepPrecommit)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, cs1.ProposalBlockParts.Header(), vss...)
waitFor(t, cs1, height, round+1, RoundStepNewRound)
}
*/
// NOTE: this switches the propser as far as `perspectiveOf` is concerned,
// but for simplicity we return a block it generated.
func changeProposer(t *testing.T, perspectiveOf *ConsensusState, newProposer *validatorStub) *types.Block {
_, v1 := perspectiveOf.Validators.GetByAddress(perspectiveOf.privValidator.Address)
v1.Accum, v1.VotingPower = 0, 0
if updated := perspectiveOf.Validators.Update(v1); !updated {
panic("failed to update validator")
}
_, v2 := perspectiveOf.Validators.GetByAddress(newProposer.Address)
v2.Accum, v2.VotingPower = 100, 100
if updated := perspectiveOf.Validators.Update(v2); !updated {
panic("failed to update validator")
}
// make the proposal
propBlock, _ := perspectiveOf.createProposalBlock()
if propBlock == nil {
panic("Failed to create proposal block with cs2")
}
return propBlock
}
func fixVotingPower(t *testing.T, cs1 *ConsensusState, addr2 []byte) {
_, v1 := cs1.Validators.GetByAddress(cs1.privValidator.Address)
_, v2 := cs1.Validators.GetByAddress(addr2)
v1.Accum, v1.VotingPower = v2.Accum, v2.VotingPower
if updated := cs1.Validators.Update(v1); !updated {
panic("failed to update validator")
}
}
func addVoteToFromMany(to *ConsensusState, votes []*types.Vote, froms ...*validatorStub) {
if len(votes) != len(froms) {
panic("len(votes) and len(froms) must match")
}
for i, from := range froms {
addVoteToFrom(to, from, votes[i])
}
}
func addVoteToFrom(to *ConsensusState, from *validatorStub, vote *types.Vote) {
to.mtx.Lock() // NOTE: wont need this when the vote comes with the index!
valIndex, _ := to.Validators.GetByAddress(from.PrivValidator.Address)
to.mtx.Unlock()
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{valIndex, vote}}
// added, err := to.TryAddVote(valIndex, vote, "")
/*
if _, ok := err.(*types.ErrVoteConflictingSignature); ok {
// let it fly
} else if !added {
fmt.Println("to, from, vote:", to.Height, from.Height, vote.Height)
panic(fmt.Sprintln("Failed to add vote. Err:", err))
} else if err != nil {
panic(fmt.Sprintln("Failed to add vote:", err))
}*/
}
func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss))
for i, vs := range vss {
votes[i] = signVote(vs, voteType, hash, header)
@ -160,89 +85,51 @@ func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, vss ..
return votes
}
// add vote to one cs from another
// if voteCh is not nil, read all votes
func signAddVoteToFromMany(voteType byte, to *ConsensusState, hash []byte, header types.PartSetHeader, voteCh chan interface{}, froms ...*validatorStub) {
var wg chan struct{} // when done reading all votes
if voteCh != nil {
wg = readVotes(voteCh, len(froms))
}
for _, from := range froms {
vote := signVote(from, voteType, hash, header)
addVoteToFrom(to, from, vote)
}
if voteCh != nil {
<-wg
func incrementHeight(vss ...*validatorStub) {
for _, vs := range vss {
vs.Height += 1
}
}
func signAddVoteToFrom(voteType byte, to *ConsensusState, from *validatorStub, hash []byte, header types.PartSetHeader, voteCh chan interface{}) *types.Vote {
var wg chan struct{} // when done reading all votes
if voteCh != nil {
wg = readVotes(voteCh, 1)
}
vote := signVote(from, voteType, hash, header)
addVoteToFrom(to, from, vote)
if voteCh != nil {
<-wg
func incrementRound(vss ...*validatorStub) {
for _, vs := range vss {
vs.Round += 1
}
return vote
}
func ensureNoNewStep(stepCh chan interface{}) {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
break
case <-stepCh:
panic("We should be stuck waiting for more votes, not moving to the next step")
}
}
//-------------------------------------------------------------------------------
// Functions for transitioning the consensus state
/*
func ensureNoNewStep(t *testing.T, cs *ConsensusState) {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
break
case <-cs.NewStepCh():
panic("We should be stuck waiting for more votes, not moving to the next step")
}
func startTestRound(cs *ConsensusState, height, round int) {
cs.enterNewRound(height, round)
cs.startRoutines(0)
}
func ensureNewStep(t *testing.T, cs *ConsensusState) *RoundState {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
panic("We should have gone to the next step, not be stuck waiting")
case rs := <-cs.NewStepCh():
return rs
// Create proposal block from cs1 but sign it with vs
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
block, blockParts := cs1.createProposalBlock()
if block == nil { // on error
panic("error creating proposal block")
}
}
func waitFor(t *testing.T, cs *ConsensusState, height int, round int, step RoundStepType) {
for {
rs := ensureNewStep(t, cs)
if CompareHRS(rs.Height, rs.Round, rs.Step, height, round, step) < 0 {
continue
} else {
break
}
// Make proposal
polRound, polBlockID := cs1.Votes.POLInfo()
proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
if err := vs.SignProposal(config.GetString("chain_id"), proposal); err != nil {
panic(err)
}
return
}
*/
func incrementHeight(vss ...*validatorStub) {
for _, vs := range vss {
vs.Height += 1
func addVotes(to *ConsensusState, votes ...*types.Vote) {
for _, vote := range votes {
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}}
}
}
func incrementRound(vss ...*validatorStub) {
for _, vs := range vss {
vs.Round += 1
}
func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
votes := signVotes(voteType, hash, header, vss...)
addVotes(to, votes...)
}
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
@ -252,12 +139,12 @@ func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *valid
panic("Failed to find prevote from validator")
}
if blockHash == nil {
if vote.BlockHash != nil {
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockHash))
if vote.BlockID.Hash != nil {
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash))
}
} else {
if !bytes.Equal(vote.BlockHash, blockHash) {
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockHash))
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash))
}
}
}
@ -268,8 +155,8 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
if vote = votes.GetByAddress(privVal.Address); vote == nil {
panic("Failed to find precommit from validator")
}
if !bytes.Equal(vote.BlockHash, blockHash) {
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockHash))
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash))
}
}
@ -281,11 +168,11 @@ func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound in
}
if votedBlockHash == nil {
if vote.BlockHash != nil {
if vote.BlockID.Hash != nil {
panic("Expected precommit to be for nil")
}
} else {
if !bytes.Equal(vote.BlockHash, votedBlockHash) {
if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) {
panic("Expected precommit to be for proposal block")
}
}
@ -311,41 +198,56 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
cs.mtx.Unlock()
}
func fixedConsensusState() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
privValidator.Reset()
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
return cs
// genesis
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh0 := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1)
voteCh := make(chan interface{})
go func() {
for {
v := <-voteCh0
vote := v.(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
voteCh <- v
}
}
}()
return voteCh
}
func fixedConsensusStateDummy() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
privValidator.Reset()
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
return cs
func readVotes(ch chan interface{}, reads int) chan struct{} {
wg := make(chan struct{})
go func() {
for i := 0; i < reads; i++ {
<-ch // read the precommit event
}
close(wg)
}()
return wg
}
func newConsensusState(state *sm.State, pv *types.PrivValidator, app tmsp.Application) *ConsensusState {
//-------------------------------------------------------------------------------
// consensus states
func newConsensusState(state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
return newConsensusStateWithConfig(config, state, pv, app)
}
func newConsensusStateWithConfig(thisConfig cfg.Config, state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
// Get BlockStore
blockDB := dbm.NewMemDB()
blockStore := bc.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(sync.Mutex)
proxyAppConnMem := tmspcli.NewLocalClient(mtx, app)
proxyAppConnCon := tmspcli.NewLocalClient(mtx, app)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(config, proxyAppConnMem)
mempool := mempl.NewMempool(thisConfig, proxyAppConnMem)
// Make ConsensusReactor
cs := NewConsensusState(config, state, proxyAppConnCon, blockStore, mempool)
cs := NewConsensusState(thisConfig, state, proxyAppConnCon, blockStore, mempool)
cs.SetPrivValidator(pv)
evsw := types.NewEventSwitch()
@ -354,6 +256,30 @@ func newConsensusState(state *sm.State, pv *types.PrivValidator, app tmsp.Applic
return cs
}
func loadPrivValidator(conf cfg.Config) *types.PrivValidator {
privValidatorFile := conf.GetString("priv_validator_file")
ensureDir(path.Dir(privValidatorFile), 0700)
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
privValidator.Reset()
return privValidator
}
func fixedConsensusState() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
return cs
}
func fixedConsensusStateDummy() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
return cs
}
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
// Get State
state, privVals := randGenesisState(nValidators, false, 10)
@ -363,7 +289,7 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
for i := 0; i < nValidators; i++ {
vss[i] = NewValidatorStub(privVals[i])
vss[i] = NewValidatorStub(privVals[i], i)
}
// since cs1 starts at 1
incrementHeight(vss[1:]...)
@ -371,41 +297,74 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
return cs, vss
}
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh0 := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1)
voteCh := make(chan interface{})
go func() {
for {
v := <-voteCh0
vote := v.(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Address) {
voteCh <- v
}
}
}()
return voteCh
//-------------------------------------------------------------------------------
func ensureNoNewStep(stepCh chan interface{}) {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
break
case <-stepCh:
panic("We should be stuck waiting for more votes, not moving to the next step")
}
}
func readVotes(ch chan interface{}, reads int) chan struct{} {
wg := make(chan struct{})
go func() {
for i := 0; i < reads; i++ {
<-ch // read the precommit event
//-------------------------------------------------------------------------------
// consensus nets
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
css := make([]*ConsensusState, nValidators)
for i := 0; i < nValidators; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], appFunc())
css[i].SetTimeoutTicker(tickerFunc())
}
return css
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower))
css := make([]*ConsensusState, nPeers)
for i := 0; i < nPeers; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
var privVal *types.PrivValidator
if i < nValidators {
privVal = privVals[i]
} else {
privVal = types.GenPrivValidator()
_, tempFilePath := Tempfile("priv_validator_")
privVal.SetFile(tempFilePath)
}
close(wg)
}()
return wg
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, appFunc())
css[i].SetTimeoutTicker(tickerFunc())
}
return css
}
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
db := dbm.NewMemDB()
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
s0 := sm.MakeGenesisState(db, genDoc)
s0.Save()
return s0, privValidators
func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
for i, s := range switches {
if bytes.Equal(peer.NodeInfo.PubKey.Address(), s.NodeInfo().PubKey.Address()) {
return i
}
}
panic("didnt find peer in switches")
return -1
}
//-------------------------------------------------------------------------------
// genesis
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]*types.PrivValidator, numValidators)
@ -423,10 +382,69 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
ChainID: config.GetString("chain_id"),
Validators: validators,
}, privValidators
}
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
db := dbm.NewMemDB()
s0 := sm.MakeGenesisState(db, genDoc)
s0.Save()
return s0, privValidators
}
func startTestRound(cs *ConsensusState, height, round int) {
cs.enterNewRound(height, round)
cs.startRoutines(0)
//------------------------------------
// mock ticker
func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker {
return func() TimeoutTicker {
return &mockTicker{
c: make(chan timeoutInfo, 10),
onlyOnce: onlyOnce,
}
}
}
// mock ticker only fires on RoundStepNewHeight
// and only once if onlyOnce=true
type mockTicker struct {
c chan timeoutInfo
mtx sync.Mutex
onlyOnce bool
fired bool
}
func (m *mockTicker) Start() (bool, error) {
return true, nil
}
func (m *mockTicker) Stop() bool {
return true
}
func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.onlyOnce && m.fired {
return
}
if ti.Step == RoundStepNewHeight {
m.c <- ti
m.fired = true
}
}
func (m *mockTicker) Chan() <-chan timeoutInfo {
return m.c
}
//------------------------------------
func newCounter() abci.Application {
return counter.NewCounterApplication(true)
}
func newPersistentDummy() abci.Application {
dir, _ := ioutil.TempDir("/tmp", "persistent-dummy")
return dummy.NewPersistentDummyApplication(dir)
}

+ 35
- 11
consensus/height_vote_set.go View File

@ -24,6 +24,8 @@ but which round is not known in advance, so when a peer
provides a precommit for a round greater than mtx.round,
we create a new entry in roundVoteSets but also remember the
peer to prevent abuse.
We let each peer provide us with up to 2 unexpected "catchup" rounds.
One for their LastCommit round, and another for the official commit round.
*/
type HeightVoteSet struct {
chainID string
@ -33,7 +35,7 @@ type HeightVoteSet struct {
mtx sync.Mutex
round int // max tracked round
roundVoteSets map[int]RoundVoteSet // keys: [0...round]
peerCatchupRounds map[string]int // keys: peer.Key; values: round
peerCatchupRounds map[string][]int // keys: peer.Key; values: at most 2 rounds
}
func NewHeightVoteSet(chainID string, height int, valSet *types.ValidatorSet) *HeightVoteSet {
@ -51,7 +53,7 @@ func (hvs *HeightVoteSet) Reset(height int, valSet *types.ValidatorSet) {
hvs.height = height
hvs.valSet = valSet
hvs.roundVoteSets = make(map[int]RoundVoteSet)
hvs.peerCatchupRounds = make(map[string]int)
hvs.peerCatchupRounds = make(map[string][]int)
hvs.addRound(0)
hvs.round = 0
@ -100,15 +102,18 @@ func (hvs *HeightVoteSet) addRound(round int) {
// Duplicate votes return added=false, err=nil.
// By convention, peerKey is "" if origin is self.
func (hvs *HeightVoteSet) AddByIndex(valIndex int, vote *types.Vote, peerKey string) (added bool, address []byte, err error) {
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(vote.Type) {
return
}
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
if voteSet == nil {
if _, ok := hvs.peerCatchupRounds[peerKey]; !ok {
if rndz := hvs.peerCatchupRounds[peerKey]; len(rndz) < 2 {
hvs.addRound(vote.Round)
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
hvs.peerCatchupRounds[peerKey] = vote.Round
hvs.peerCatchupRounds[peerKey] = append(rndz, vote.Round)
} else {
// Peer has sent a vote that does not match our round,
// for more than one round. Bad peer!
@ -117,7 +122,7 @@ func (hvs *HeightVoteSet) AddByIndex(valIndex int, vote *types.Vote, peerKey str
return
}
}
added, address, err = voteSet.AddByIndex(valIndex, vote)
added, err = voteSet.AddVote(vote)
return
}
@ -133,17 +138,19 @@ func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet {
return hvs.getVoteSet(round, types.VoteTypePrecommit)
}
// Last round that has +2/3 prevotes for a particular block or nil.
// Last round and blockID that has +2/3 prevotes for a particular block or nil.
// Returns -1 if no such round exists.
func (hvs *HeightVoteSet) POLRound() int {
func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
for r := hvs.round; r >= 0; r-- {
if hvs.getVoteSet(r, types.VoteTypePrevote).HasTwoThirdsMajority() {
return r
rvs := hvs.getVoteSet(r, types.VoteTypePrevote)
polBlockID, ok := rvs.TwoThirdsMajority()
if ok {
return r, polBlockID
}
}
return -1
return -1, types.BlockID{}
}
func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
@ -194,3 +201,20 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
indent, strings.Join(vsStrings, "\n"+indent+" "),
indent)
}
// If a peer claims that it has 2/3 majority for given blockKey, call this.
// NOTE: if there are too many peers, or too much peer churn,
// this can cause memory issues.
// TODO: implement ability to remove peers too
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID string, blockID types.BlockID) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(type_) {
return
}
voteSet := hvs.getVoteSet(round, type_)
if voteSet == nil {
return
}
voteSet.SetPeerMaj23(peerID, blockID)
}

+ 19
- 10
consensus/height_vote_set_test.go View File

@ -17,31 +17,40 @@ func TestPeerCatchupRounds(t *testing.T) {
hvs := NewHeightVoteSet(config.GetString("chain_id"), 1, valSet)
vote999_0 := makeVoteHR(t, 1, 999, privVals[0])
added, _, err := hvs.AddByIndex(0, vote999_0, "peer1")
vote999_0 := makeVoteHR(t, 1, 999, privVals, 0)
added, err := hvs.AddVote(vote999_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
vote1000_0 := makeVoteHR(t, 1, 1000, privVals[0])
added, _, err = hvs.AddByIndex(0, vote1000_0, "peer1")
vote1000_0 := makeVoteHR(t, 1, 1000, privVals, 0)
added, err = hvs.AddVote(vote1000_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0)
added, err = hvs.AddVote(vote1001_0, "peer1")
if added {
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
}
added, _, err = hvs.AddByIndex(0, vote1000_0, "peer2")
added, err = hvs.AddVote(vote1001_0, "peer2")
if !added || err != nil {
t.Error("Expected to successfully add vote from another peer")
}
}
func makeVoteHR(t *testing.T, height, round int, privVal *types.PrivValidator) *types.Vote {
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidator, valIndex int) *types.Vote {
privVal := privVals[valIndex]
vote := &types.Vote{
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockHash: []byte("fakehash"),
ValidatorAddress: privVal.Address,
ValidatorIndex: valIndex,
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
}
chainID := config.GetString("chain_id")
err := privVal.SignVote(chainID, vote)


+ 20
- 20
consensus/mempool_test.go View File

@ -7,7 +7,7 @@ import (
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common"
)
@ -23,8 +23,8 @@ func TestTxConcurrentWithCommit(t *testing.T) {
height, round := cs.Height, cs.Round
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
appendTxsRange := func(start, end int) {
// Append some txs.
deliverTxsRange := func(start, end int) {
// Deliver some txs.
for i := start; i < end; i++ {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
@ -37,7 +37,7 @@ func TestTxConcurrentWithCommit(t *testing.T) {
}
NTxs := 10000
go appendTxsRange(0, NTxs)
go deliverTxsRange(0, NTxs)
startTestRound(cs, height, round)
ticker := time.NewTicker(time.Second * 20)
@ -59,17 +59,17 @@ func TestRmBadTx(t *testing.T) {
// increment the counter by 1
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
app.AppendTx(txBytes)
app.DeliverTx(txBytes)
app.Commit()
ch := make(chan struct{})
cbCh := make(chan struct{})
go func() {
// Try to send the tx through the mempool.
// CheckTx should not err, but the app should return a bad tmsp code
// CheckTx should not err, but the app should return a bad abci code
// and the tx should get removed from the pool
err := cs.mempool.CheckTx(txBytes, func(r *tmsp.Response) {
if r.GetCheckTx().Code != tmsp.CodeType_BadNonce {
err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) {
if r.GetCheckTx().Code != abci.CodeType_BadNonce {
t.Fatalf("expected checktx to return bad nonce, got %v", r)
}
cbCh <- struct{}{}
@ -122,45 +122,45 @@ func NewCounterApplication() *CounterApplication {
return &CounterApplication{}
}
func (app *CounterApplication) Info() string {
return Fmt("txs:%v", app.txCount)
func (app *CounterApplication) Info() abci.ResponseInfo {
return abci.ResponseInfo{Data: Fmt("txs:%v", app.txCount)}
}
func (app *CounterApplication) SetOption(key string, value string) (log string) {
return ""
}
func (app *CounterApplication) AppendTx(tx []byte) tmsp.Result {
func (app *CounterApplication) DeliverTx(tx []byte) abci.Result {
return runTx(tx, &app.txCount)
}
func (app *CounterApplication) CheckTx(tx []byte) tmsp.Result {
func (app *CounterApplication) CheckTx(tx []byte) abci.Result {
return runTx(tx, &app.mempoolTxCount)
}
func runTx(tx []byte, countPtr *int) tmsp.Result {
func runTx(tx []byte, countPtr *int) abci.Result {
count := *countPtr
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(count) {
return tmsp.ErrBadNonce.AppendLog(Fmt("Invalid nonce. Expected %v, got %v", count, txValue))
return abci.ErrBadNonce.AppendLog(Fmt("Invalid nonce. Expected %v, got %v", count, txValue))
}
*countPtr += 1
return tmsp.OK
return abci.OK
}
func (app *CounterApplication) Commit() tmsp.Result {
func (app *CounterApplication) Commit() abci.Result {
app.mempoolTxCount = app.txCount
if app.txCount == 0 {
return tmsp.OK
return abci.OK
} else {
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return tmsp.NewResultOK(hash, "")
return abci.NewResultOK(hash, "")
}
}
func (app *CounterApplication) Query(query []byte) tmsp.Result {
return tmsp.NewResultOK(nil, Fmt("Query is not supported"))
func (app *CounterApplication) Query(query []byte) abci.Result {
return abci.NewResultOK(nil, Fmt("Query is not supported"))
}

+ 330
- 100
consensus/reactor.go View File

@ -11,36 +11,35 @@ import (
. "github.com/tendermint/go-common"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
bc "github.com/tendermint/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
const (
StateChannel = byte(0x20)
DataChannel = byte(0x21)
VoteChannel = byte(0x22)
peerGossipSleepDuration = 100 * time.Millisecond // Time to sleep if there's nothing to send.
maxConsensusMessageSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes.
StateChannel = byte(0x20)
DataChannel = byte(0x21)
VoteChannel = byte(0x22)
VoteSetBitsChannel = byte(0x23)
peerGossipSleepDuration = 100 * time.Millisecond // Time to sleep if there's nothing to send.
peerQueryMaj23SleepDuration = 2 * time.Second // Time to sleep after each VoteSetMaj23Message sent
maxConsensusMessageSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes.
)
//-----------------------------------------------------------------------------
type ConsensusReactor struct {
p2p.BaseReactor // QuitService + p2p.Switch
p2p.BaseReactor // BaseService + p2p.Switch
blockStore *bc.BlockStore
conS *ConsensusState
fastSync bool
evsw types.EventSwitch
conS *ConsensusState
fastSync bool
evsw types.EventSwitch
}
func NewConsensusReactor(consensusState *ConsensusState, blockStore *bc.BlockStore, fastSync bool) *ConsensusReactor {
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
conR := &ConsensusReactor{
blockStore: blockStore,
conS: consensusState,
fastSync: fastSync,
conS: consensusState,
fastSync: fastSync,
}
conR.BaseReactor = *p2p.NewBaseReactor(log, "ConsensusReactor", conR)
return conR
@ -101,6 +100,12 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100,
},
&p2p.ChannelDescriptor{
ID: VoteSetBitsChannel,
Priority: 1,
SendQueueCapacity: 2,
RecvBufferCapacity: 1024,
},
}
}
@ -114,9 +119,10 @@ func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
peerState := NewPeerState(peer)
peer.Data.Set(types.PeerStateKey, peerState)
// Begin gossip routines for this peer.
// Begin routines for this peer.
go conR.gossipDataRoutine(peer, peerState)
go conR.gossipVotesRoutine(peer, peerState)
go conR.queryMaj23Routine(peer, peerState)
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
@ -166,6 +172,36 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
ps.ApplyCommitStepMessage(msg)
case *HasVoteMessage:
ps.ApplyHasVoteMessage(msg)
case *VoteSetMaj23Message:
cs := conR.conS
cs.mtx.Lock()
height, votes := cs.Height, cs.Votes
cs.mtx.Unlock()
if height != msg.Height {
return
}
// Peer claims to have a maj23 for some BlockID at H,R,S,
votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.Key, msg.BlockID)
// Respond with a VoteSetBitsMessage showing which votes we have.
// (and consequently shows which we don't have)
var ourVotes *BitArray
switch msg.Type {
case types.VoteTypePrevote:
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
case types.VoteTypePrecommit:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
log.Warn("Bad VoteSetBitsMessage field Type")
return
}
src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: msg.BlockID,
Votes: ourVotes,
}})
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
@ -201,7 +237,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
cs.mtx.Unlock()
ps.EnsureVoteBitArrays(height, valSize)
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
ps.SetHasVote(msg.Vote, msg.ValidatorIndex)
ps.SetHasVote(msg.Vote)
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key}
@ -209,6 +245,39 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
// don't punish (leave room for soft upgrades)
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteSetBitsChannel:
if conR.fastSync {
log.Warn("Ignoring message received during fastSync", "msg", msg)
return
}
switch msg := msg.(type) {
case *VoteSetBitsMessage:
cs := conR.conS
cs.mtx.Lock()
height, votes := cs.Height, cs.Votes
cs.mtx.Unlock()
if height == msg.Height {
var ourVotes *BitArray
switch msg.Type {
case types.VoteTypePrevote:
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
case types.VoteTypePrecommit:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
log.Warn("Bad VoteSetBitsMessage field Type")
return
}
ps.ApplyVoteSetBitsMessage(msg, ourVotes)
} else {
ps.ApplyVoteSetBitsMessage(msg, nil)
}
default:
// don't punish (leave room for soft upgrades)
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
default:
log.Warn(Fmt("Unknown chId %X", chID))
}
@ -218,11 +287,6 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
}
}
// Sets our private validator account for signing votes.
func (conR *ConsensusReactor) SetPrivValidator(priv *types.PrivValidator) {
conR.conS.SetPrivValidator(priv)
}
// implements events.Eventable
func (conR *ConsensusReactor) SetEventSwitch(evsw types.EventSwitch) {
conR.evsw = evsw
@ -242,7 +306,7 @@ func (conR *ConsensusReactor) registerEventCallbacks() {
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringVote(), func(data types.TMEventData) {
edv := data.(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote, edv.Index)
conR.broadcastHasVoteMessage(edv.Vote)
})
}
@ -258,12 +322,12 @@ func (conR *ConsensusReactor) broadcastNewRoundStep(rs *RoundState) {
}
// Broadcasts HasVoteMessage to peers that care.
func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote, index int) {
func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
msg := &HasVoteMessage{
Height: vote.Height,
Round: vote.Round,
Type: vote.Type,
Index: index,
Index: vote.ValidatorIndex,
}
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg})
/*
@ -346,15 +410,19 @@ OUTER_LOOP:
//log.Info("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockParts", prs.ProposalBlockParts)
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
// Ensure that the peer's PartSetHeader is correct
blockMeta := conR.blockStore.LoadBlockMeta(prs.Height)
if !blockMeta.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
log.Warn("Failed to load block meta", "peer height", prs.Height, "our height", rs.Height, "blockstore height", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
} else if !blockMeta.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
log.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
}
// Load the part
part := conR.blockStore.LoadBlockPart(prs.Height, index)
part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
if part == nil {
log.Warn("Could not load part", "index", index,
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
@ -391,13 +459,13 @@ OUTER_LOOP:
// Send Proposal && ProposalPOL BitArray?
if rs.Proposal != nil && !prs.Proposal {
// Proposal
// Proposal: share the proposal metadata with peer.
{
msg := &ProposalMessage{Proposal: rs.Proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
ps.SetHasProposal(rs.Proposal)
}
// ProposalPOL.
// ProposalPOL: lets peer know which POL votes we have so far.
// Peer must receive ProposalMessage first.
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
@ -492,8 +560,8 @@ OUTER_LOOP:
if prs.Height != 0 && rs.Height >= prs.Height+2 {
// Load the block commit for prs.Height,
// which contains precommit signatures for prs.Height.
commit := conR.blockStore.LoadBlockCommit(prs.Height)
log.Debug("Loaded BlockCommit for catch-up", "height", prs.Height, "commit", commit)
commit := conR.conS.blockStore.LoadBlockCommit(prs.Height)
log.Info("Loaded BlockCommit for catch-up", "height", prs.Height, "commit", commit)
if ps.PickSendVote(commit) {
log.Debug("Picked Catchup commit to send")
continue OUTER_LOOP
@ -503,7 +571,7 @@ OUTER_LOOP:
if sleeping == 0 {
// We sent nothing. Sleep...
sleeping = 1
log.Info("No votes to send, sleeping", "peer", peer,
log.Debug("No votes to send, sleeping", "peer", peer,
"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
} else if sleeping == 2 {
@ -516,6 +584,110 @@ OUTER_LOOP:
}
}
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
// into play for liveness when there's a signature DDoS attack happening.
func (conR *ConsensusReactor) queryMaj23Routine(peer *p2p.Peer, ps *PeerState) {
log := log.New("peer", peer)
OUTER_LOOP:
for {
// Manage disconnects from self or peer.
if !peer.IsRunning() || !conR.IsRunning() {
log.Notice(Fmt("Stopping queryMaj23Routine for %v.", peer))
return
}
// Maybe send Height/Round/Prevotes
{
rs := conR.conS.GetRoundState()
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrevote,
BlockID: maj23,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
}
// Maybe send Height/Round/Precommits
{
rs := conR.conS.GetRoundState()
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrecommit,
BlockID: maj23,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
}
// Maybe send Height/Round/ProposalPOL
{
rs := conR.conS.GetRoundState()
prs := ps.GetRoundState()
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.ProposalPOLRound,
Type: types.VoteTypePrevote,
BlockID: maj23,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
}
// Little point sending LastCommitRound/LastCommit,
// These are fleeting and non-blocking.
// Maybe send Height/CatchupCommitRound/CatchupCommit.
{
prs := ps.GetRoundState()
if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() {
commit := conR.conS.LoadCommit(prs.Height)
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: commit.Round(),
Type: types.VoteTypePrecommit,
BlockID: commit.BlockID,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
time.Sleep(peerQueryMaj23SleepDuration)
continue OUTER_LOOP
}
}
func (conR *ConsensusReactor) String() string {
// better not to access shared variables
return "ConsensusReactor" // conR.StringIndented("")
}
func (conR *ConsensusReactor) StringIndented(indent string) string {
s := "ConsensusReactor{\n"
s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n"
for _, peer := range conR.Switch.Peers().List() {
ps := peer.Data.Get(types.PeerStateKey).(*PeerState)
s += indent + " " + ps.StringIndented(indent+" ") + "\n"
}
s += indent + "}"
return s
}
//-----------------------------------------------------------------------------
// Read only when returned by PeerState.GetRoundState().
@ -537,6 +709,30 @@ type PeerRoundState struct {
CatchupCommit *BitArray // All commit precommits peer has for this height & CatchupCommitRound
}
func (prs PeerRoundState) String() string {
return prs.StringIndented("")
}
func (prs PeerRoundState) StringIndented(indent string) string {
return fmt.Sprintf(`PeerRoundState{
%s %v/%v/%v @%v
%s Proposal %v -> %v
%s POL %v (round %v)
%s Prevotes %v
%s Precommits %v
%s LastCommit %v (round %v)
%s Catchup %v (round %v)
%s}`,
indent, prs.Height, prs.Round, prs.Step, prs.StartTime,
indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts,
indent, prs.ProposalPOL, prs.ProposalPOLRound,
indent, prs.Prevotes,
indent, prs.Precommits,
indent, prs.LastCommit, prs.LastCommitRound,
indent, prs.CatchupCommit, prs.CatchupCommitRound,
indent)
}
//-----------------------------------------------------------------------------
var (
@ -613,8 +809,8 @@ func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
// Convenience function to send vote to peer.
// Returns true if vote was sent.
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) (ok bool) {
if index, vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{index, vote}
if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote}
ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
return true
}
@ -622,12 +818,12 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) (ok bool) {
}
// votes: Must be the correct Size() for the Height().
func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (index int, vote *types.Vote, ok bool) {
func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if votes.Size() == 0 {
return 0, nil, false
return nil, false
}
height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size()
@ -640,16 +836,20 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (index int, vote
psVotes := ps.getVoteBitArray(height, round, type_)
if psVotes == nil {
return 0, nil, false // Not something worth sending
return nil, false // Not something worth sending
}
if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
ps.setHasVote(height, round, type_, index)
return index, votes.GetByIndex(index), true
return votes.GetByIndex(index), true
}
return 0, nil, false
return nil, false
}
func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
if !types.IsVoteTypeValid(type_) {
PanicSanity("Invalid vote type")
}
if ps.Height == height {
if ps.Round == round {
switch type_ {
@ -657,8 +857,6 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
return ps.Prevotes
case types.VoteTypePrecommit:
return ps.Precommits
default:
PanicSanity(Fmt("Unexpected vote type %X", type_))
}
}
if ps.CatchupCommitRound == round {
@ -667,8 +865,14 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
return nil
case types.VoteTypePrecommit:
return ps.CatchupCommit
default:
PanicSanity(Fmt("Unexpected vote type %X", type_))
}
}
if ps.ProposalPOLRound == round {
switch type_ {
case types.VoteTypePrevote:
return ps.ProposalPOL
case types.VoteTypePrecommit:
return nil
}
}
return nil
@ -680,8 +884,6 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
return nil
case types.VoteTypePrecommit:
return ps.LastCommit
default:
PanicSanity(Fmt("Unexpected vote type %X", type_))
}
}
return nil
@ -741,56 +943,19 @@ func (ps *PeerState) ensureVoteBitArrays(height int, numValidators int) {
}
}
func (ps *PeerState) SetHasVote(vote *types.Vote, index int) {
func (ps *PeerState) SetHasVote(vote *types.Vote) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
ps.setHasVote(vote.Height, vote.Round, vote.Type, index)
ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
}
func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
log := log.New("peer", ps.Peer, "peerRound", ps.Round, "height", height, "round", round)
if type_ != types.VoteTypePrevote && type_ != types.VoteTypePrecommit {
PanicSanity("Invalid vote type")
}
log.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
if ps.Height == height {
if ps.Round == round {
switch type_ {
case types.VoteTypePrevote:
ps.Prevotes.SetIndex(index, true)
log.Debug("SetHasVote(round-match)", "prevotes", ps.Prevotes, "index", index)
case types.VoteTypePrecommit:
ps.Precommits.SetIndex(index, true)
log.Debug("SetHasVote(round-match)", "precommits", ps.Precommits, "index", index)
}
} else if ps.CatchupCommitRound == round {
switch type_ {
case types.VoteTypePrevote:
case types.VoteTypePrecommit:
ps.CatchupCommit.SetIndex(index, true)
log.Debug("SetHasVote(CatchupCommit)", "precommits", ps.Precommits, "index", index)
}
} else if ps.ProposalPOLRound == round {
switch type_ {
case types.VoteTypePrevote:
ps.ProposalPOL.SetIndex(index, true)
log.Debug("SetHasVote(ProposalPOL)", "prevotes", ps.Prevotes, "index", index)
case types.VoteTypePrecommit:
}
}
} else if ps.Height == height+1 {
if ps.LastCommitRound == round {
switch type_ {
case types.VoteTypePrevote:
case types.VoteTypePrecommit:
ps.LastCommit.SetIndex(index, true)
log.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
}
}
} else {
// Does not apply.
}
// NOTE: some may be nil BitArrays -> no side effects.
ps.getVoteBitArray(height, round, type_).SetIndex(index, true)
}
func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
@ -858,31 +1023,66 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
ps.ProposalBlockParts = msg.BlockParts
}
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.Height != msg.Height {
return
}
if ps.ProposalPOLRound != msg.ProposalPOLRound {
return
}
ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
// TODO: Merge onto existing ps.ProposalPOL?
// We might have sent some prevotes in the meantime.
ps.ProposalPOL = msg.ProposalPOL
}
func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.Height != msg.Height {
return
}
if ps.ProposalPOLRound != msg.ProposalPOLRound {
return
ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
}
// The peer has responded with a bitarray of votes that it has
// of the corresponding BlockID.
// ourVotes: BitArray of votes we have for msg.BlockID
// NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height),
// we conservatively overwrite ps's votes w/ msg.Votes.
func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *BitArray) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type)
if votes != nil {
if ourVotes == nil {
votes.Update(msg.Votes)
} else {
otherVotes := votes.Sub(ourVotes)
hasVotes := otherVotes.Or(msg.Votes)
votes.Update(hasVotes)
}
}
}
// TODO: Merge onto existing ps.ProposalPOL?
// We might have sent some prevotes in the meantime.
ps.ProposalPOL = msg.ProposalPOL
func (ps *PeerState) String() string {
return ps.StringIndented("")
}
func (ps *PeerState) StringIndented(indent string) string {
return fmt.Sprintf(`PeerState{
%s Key %v
%s PRS %v
%s}`,
indent, ps.Peer.Key,
indent, ps.PeerRoundState.StringIndented(indent+" "),
indent)
}
//-----------------------------------------------------------------------------
@ -896,6 +1096,8 @@ const (
msgTypeBlockPart = byte(0x13) // both block & POL
msgTypeVote = byte(0x14)
msgTypeHasVote = byte(0x15)
msgTypeVoteSetMaj23 = byte(0x16)
msgTypeVoteSetBits = byte(0x17)
)
type ConsensusMessage interface{}
@ -909,6 +1111,8 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart},
wire.ConcreteType{&VoteMessage{}, msgTypeVote},
wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote},
wire.ConcreteType{&VoteSetMaj23Message{}, msgTypeVoteSetMaj23},
wire.ConcreteType{&VoteSetBitsMessage{}, msgTypeVoteSetBits},
)
// TODO: check for unnecessary extra bytes at the end.
@ -985,12 +1189,11 @@ func (m *BlockPartMessage) String() string {
//-------------------------------------
type VoteMessage struct {
ValidatorIndex int
Vote *types.Vote
Vote *types.Vote
}
func (m *VoteMessage) String() string {
return fmt.Sprintf("[Vote VI:%v V:%v VI:%v]", m.ValidatorIndex, m.Vote, m.ValidatorIndex)
return fmt.Sprintf("[Vote %v]", m.Vote)
}
//-------------------------------------
@ -1005,3 +1208,30 @@ type HasVoteMessage struct {
func (m *HasVoteMessage) String() string {
return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v} VI:%v]", m.Index, m.Height, m.Round, m.Type, m.Index)
}
//-------------------------------------
type VoteSetMaj23Message struct {
Height int
Round int
Type byte
BlockID types.BlockID
}
func (m *VoteSetMaj23Message) String() string {
return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID)
}
//-------------------------------------
type VoteSetBitsMessage struct {
Height int
Round int
Type byte
BlockID types.BlockID
Votes *BitArray
}
func (m *VoteSetBitsMessage) String() string {
return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes)
}

+ 309
- 0
consensus/reactor_test.go View File

@ -0,0 +1,309 @@
package consensus
import (
"fmt"
"sync"
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/abci/example/dummy"
)
func init() {
config = tendermint_test.ResetConfig("consensus_reactor_test")
}
//----------------------------------------------
// in-process testnets
func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEventRespond bool) ([]*ConsensusReactor, []chan interface{}) {
reactors := make([]*ConsensusReactor, N)
eventChans := make([]chan interface{}, N)
for i := 0; i < N; i++ {
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
eventSwitch := events.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
}
reactors[i].SetEventSwitch(eventSwitch)
if subscribeEventRespond {
eventChans[i] = subscribeToEventRespond(eventSwitch, "tester", types.EventStringNewBlock())
} else {
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
}
}
// make connected switches and start all reactors
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("CONSENSUS", reactors[i])
return s
}, p2p.Connect2Switches)
// now that everyone is connected, start the state machines
// If we started the state machines before everyone was connected,
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
for i := 0; i < N; i++ {
s := reactors[i].conS.GetState()
reactors[i].SwitchToConsensus(s)
}
return reactors, eventChans
}
func stopConsensusNet(reactors []*ConsensusReactor) {
for _, r := range reactors {
r.Switch.Stop()
}
}
// Ensure a testnet makes blocks
func TestReactor(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
reactors, eventChans := startConsensusNet(t, css, N, false)
defer stopConsensusNet(reactors)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
wg.Done()
}, css)
}
//-------------------------------------------------------------
// ensure we can make blocks despite cycling a validator set
func TestVotingPowerChange(t *testing.T) {
nVals := 4
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentDummy)
reactors, eventChans := startConsensusNet(t, css, nVals, true)
defer stopConsensusNet(reactors)
// map of active validators
activeVals := make(map[string]struct{})
for i := 0; i < nVals; i++ {
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
}
// wait till everyone makes block 1
timeoutWaitGroup(t, nVals, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
eventChans[j] <- struct{}{}
wg.Done()
}, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing changing the voting power of one validator a few times")
val1PubKey := css[0].privValidator.(*types.PrivValidator).PubKey
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
}
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
}
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 100)
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
}
}
func TestValidatorSetChanges(t *testing.T) {
nPeers := 7
nVals := 4
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentDummy)
reactors, eventChans := startConsensusNet(t, css, nPeers, true)
defer stopConsensusNet(reactors)
// map of active validators
activeVals := make(map[string]struct{})
for i := 0; i < nVals; i++ {
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
}
// wait till everyone makes block 1
timeoutWaitGroup(t, nPeers, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
eventChans[j] <- struct{}{}
wg.Done()
}, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing adding one validator")
newValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower))
// wait till everyone makes block 2
// ensure the commit includes all validators
// send newValTx to change vals in block 3
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1)
// wait till everyone makes block 3.
// it includes the commit for block 2, which is by the original validator set
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
// wait till everyone makes block 4.
// it includes the commit for block 3, which is by the original validator set
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
// the commits for block 4 should be with the updated validator set
activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
// wait till everyone makes block 5
// it includes the commit for block 4, which should have the updated validator set
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing changing the voting power of one validator")
updateValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
}
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing adding two validators at once")
newValidatorPubKey2 := css[nVals+1].privValidator.(*types.PrivValidator).PubKey
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower))
newValidatorPubKey3 := css[nVals+2].privValidator.(*types.PrivValidator).PubKey
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower))
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing removing two validators at once")
removeValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
removeValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
delete(activeVals, string(newValidatorPubKey2.Address()))
delete(activeVals, string(newValidatorPubKey3.Address()))
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
}
// Check we can make blocks with skip_timeout_commit=false
func TestReactorWithTimeoutCommit(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
// override default SkipTimeoutCommit == true for tests
for i := 0; i < N; i++ {
css[i].timeoutParams.SkipTimeoutCommit = false
}
reactors, eventChans := startConsensusNet(t, css, N-1, false)
defer stopConsensusNet(reactors)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N-1, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
wg.Done()
}, css)
}
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
newBlockI := <-eventChans[j]
newBlock := newBlockI.(types.EventDataNewBlock).Block
log.Warn("Got block", "height", newBlock.Height, "validator", j)
err := validateBlock(newBlock, activeVals)
if err != nil {
t.Fatal(err)
}
for _, tx := range txs {
css[j].mempool.CheckTx(tx, nil)
}
eventChans[j] <- struct{}{}
wg.Done()
log.Warn("Done wait group", "height", newBlock.Height, "validator", j)
}, css)
}
// expects high synchrony!
func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
if block.LastCommit.Size() != len(activeVals) {
return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals))
}
for _, vote := range block.LastCommit.Precommits {
if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok {
return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress)
}
}
return nil
}
func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []*ConsensusState) {
wg := new(sync.WaitGroup)
wg.Add(n)
for i := 0; i < n; i++ {
go f(wg, i)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(time.Second * 10):
for i, cs := range css {
fmt.Println("#################")
fmt.Println("Validator", i)
fmt.Println(cs.GetRoundState())
fmt.Println("")
}
panic("Timed out waiting for all validators to commit a block")
}
}

+ 73
- 74
consensus/replay.go View File

@ -11,6 +11,7 @@ import (
"strings"
"time"
auto "github.com/tendermint/go-autofile"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-wire"
@ -18,12 +19,17 @@ import (
"github.com/tendermint/tendermint/types"
)
// unmarshal and apply a single message to the consensus state
// Unmarshal and apply a single message to the consensus state
// as if it were received in receiveRoutine
// Lines that start with "#" are ignored.
// NOTE: receiveRoutine should not be running
func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan interface{}) error {
// Skip over empty and meta lines
if len(msgBytes) == 0 || msgBytes[0] == '#' {
return nil
}
var err error
var msg ConsensusLogMessage
var msg TimedWALMessage
wire.ReadJSON(&msg, msgBytes, &err)
if err != nil {
fmt.Println("MsgBytes:", msgBytes, string(msgBytes))
@ -62,7 +68,7 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
case *VoteMessage:
v := msg.Vote
log.Notice("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
"hash", v.BlockHash, "header", v.BlockPartsHeader, "peer", peerKey)
"blockID", v.BlockID, "peer", peerKey)
}
cs.handleMsg(m, cs.RoundState)
@ -70,7 +76,7 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
log.Notice("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
cs.handleTimeout(m, cs.RoundState)
default:
return fmt.Errorf("Replay: Unknown ConsensusLogMessage type: %v", reflect.TypeOf(msg.Msg))
return fmt.Errorf("Replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg))
}
return nil
}
@ -78,83 +84,48 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
// replay only those messages since the last block.
// timeoutRoutine should run concurrently to read off tickChan
func (cs *ConsensusState) catchupReplay(csHeight int) error {
if !cs.wal.Exists() {
return nil
}
// set replayMode
cs.replayMode = true
defer func() { cs.replayMode = false }()
// starting from end of file,
// read messages until a new height is found
var walHeight int
nLines, err := cs.wal.SeekFromEnd(func(lineBytes []byte) bool {
var err error
var msg ConsensusLogMessage
wire.ReadJSON(&msg, lineBytes, &err)
if err != nil {
panic(Fmt("Failed to read cs_msg_log json: %v", err))
}
m, ok := msg.Msg.(types.EventDataRoundState)
walHeight = m.Height
if ok && m.Step == RoundStepNewHeight.String() {
return true
}
return false
})
if err != nil {
return err
// Ensure that height+1 doesn't exist
gr, found, err := cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight+1))
if found {
return errors.New(Fmt("WAL should not contain height %d.", csHeight+1))
}
if gr != nil {
gr.Close()
}
// ensure the height matches
if walHeight != csHeight {
var err error
if walHeight > csHeight {
err = errors.New(Fmt("WAL height (%d) exceeds cs height (%d). Is your cs.state corrupted?", walHeight, csHeight))
} else {
log.Notice("Replay: nothing to do", "cs.height", csHeight, "wal.height", walHeight)
}
// Search for height marker
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "height", csHeight)
return nil
} else if err != nil {
return err
}
var beginning bool // if we had to go back to the beginning
if c, _ := cs.wal.fp.Seek(0, 1); c == 0 {
beginning = true
if !found {
return errors.New(Fmt("WAL does not contain height %d.", csHeight))
}
defer gr.Close()
log.Notice("Catchup by replaying consensus messages", "n", nLines, "height", walHeight)
// now we can replay the latest nLines on consensus state
// note we can't use scan because we've already been reading from the file
// XXX: if a msg is too big we need to find out why or increase this for that case ...
maxMsgSize := 1000000
reader := bufio.NewReaderSize(cs.wal.fp, maxMsgSize)
for i := 0; i < nLines; i++ {
msgBytes, err := reader.ReadBytes('\n')
if err == io.EOF {
log.Warn("Replay: EOF", "bytes", string(msgBytes))
break
} else if err != nil {
return err
} else if len(msgBytes) == 0 {
log.Warn("Replay: msg bytes is 0")
continue
} else if len(msgBytes) == 1 && msgBytes[0] == '\n' {
log.Warn("Replay: new line")
continue
}
// the first msg is the NewHeight event (if we're not at the beginning), so we can ignore it
if !beginning && i == 1 {
log.Warn("Replay: not beginning and 1")
continue
}
log.Notice("Catchup by replaying consensus messages", "height", csHeight)
for {
line, err := gr.ReadLine()
if err != nil {
if err == io.EOF {
break
} else {
return err
}
}
// NOTE: since the priv key is set when the msgs are received
// it will attempt to eg double sign but we can just ignore it
// since the votes will be replayed and we'll get to the next step
if err := cs.readReplayMessage(msgBytes, nil); err != nil {
if err := cs.readReplayMessage([]byte(line), nil); err != nil {
return err
}
}
@ -245,6 +216,7 @@ func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
pb.cs.Stop()
pb.cs.Wait()
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool)
newCS.SetEventSwitch(pb.cs.evsw)
@ -274,16 +246,18 @@ func (cs *ConsensusState) startForReplay() {
// don't want to start full cs
cs.BaseService.OnStart()
log.Warn("Replay commands are disabled until someone updates them and writes tests")
/* TODO:!
// since we replay tocks we just ignore ticks
go func() {
for {
select {
case <-cs.tickChan:
case <-cs.Quit:
return
go func() {
for {
select {
case <-cs.tickChan:
case <-cs.Quit:
return
}
}
}
}()
}()*/
}
// console function for parsing input and running commands
@ -376,3 +350,28 @@ func (pb *playback) replayConsoleLoop() int {
}
return 0
}
//--------------------------------------------------------------------------------
// Parses marker lines of the form:
// #HEIGHT: 12345
func makeHeightSearchFunc(height int) auto.SearchFunc {
return func(line string) (int, error) {
line = strings.TrimRight(line, "\n")
parts := strings.Split(line, " ")
if len(parts) != 2 {
return -1, errors.New("Line did not have 2 parts")
}
i, err := strconv.Atoi(parts[1])
if err != nil {
return -1, errors.New("Failed to parse INFO: " + err.Error())
}
if height < i {
return 1, nil
} else if height == i {
return 0, nil
} else {
return -1, nil
}
}
}

+ 81
- 65
consensus/replay_test.go View File

@ -9,26 +9,38 @@ import (
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
func init() {
config = tendermint_test.ResetConfig("consensus_replay_test")
}
// TODO: these tests ensure we can always recover from any state of the wal,
// assuming it comes with a correct related state for the priv_validator.json.
// It would be better to verify explicitly which states we can recover from without the wal
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
var data_dir = path.Join(GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data")
// the priv validator changes step at these lines for a block with 1 val and 1 part
var baseStepChanges = []int{2, 5, 7}
var baseStepChanges = []int{3, 6, 8}
// test recovery from each line in each testCase
var testCases = []*testCase{
newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part)
newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part
newTestCase("small_block2", []int{2, 7, 9}), // small block with txs across 3 smaller block parts
newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part)
newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part
newTestCase("small_block2", []int{3, 10, 12}), // small block with txs across 5 smaller block parts
}
type testCase struct {
name string
log string //full cswal
log string //full cs wal
stepMap map[int]int8 // map lines of log to privval step
proposeLine int
@ -71,21 +83,20 @@ func readWAL(p string) string {
return string(b)
}
func writeWAL(log string) string {
fmt.Println("writing", log)
// write the needed wal to file
f, err := ioutil.TempFile(os.TempDir(), "replay_test_")
func writeWAL(walMsgs string) string {
tempDir := os.TempDir()
walDir := tempDir + "/wal" + RandStr(12)
// Create WAL directory
err := EnsureDir(walDir, 0700)
if err != nil {
panic(err)
}
_, err = f.WriteString(log)
// Write the needed WAL to file
err = WriteFile(walDir+"/wal", []byte(walMsgs), 0600)
if err != nil {
panic(err)
}
name := f.Name()
f.Close()
return name
return walDir
}
func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
@ -97,22 +108,36 @@ func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
}
}
func runReplayTest(t *testing.T, cs *ConsensusState, fileName string, newBlockCh chan interface{},
func runReplayTest(t *testing.T, cs *ConsensusState, walDir string, newBlockCh chan interface{},
thisCase *testCase, i int) {
cs.config.Set("cswal", fileName)
cs.config.Set("cs_wal_dir", walDir)
cs.Start()
// Wait to make a new block.
// This is just a signal that we haven't halted; its not something contained in the WAL itself.
// Assuming the consensus state is running, replay of any WAL, including the empty one,
// should eventually be followed by a new block, or else something is wrong
waitForBlock(newBlockCh, thisCase, i)
cs.evsw.Stop()
cs.Stop()
LOOP:
for {
select {
case <-newBlockCh:
default:
break LOOP
}
}
cs.Wait()
}
func toPV(pv PrivValidator) *types.PrivValidator {
return pv.(*types.PrivValidator)
}
func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
fmt.Println("-------------------------------------")
log.Notice(Fmt("Starting replay test of %d lines of WAL (crash before write)", nLines))
log.Notice(Fmt("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter))
lineStep := nLines
if crashAfter {
@ -123,19 +148,29 @@ func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*Consensu
lastMsg := split[nLines]
// we write those lines up to (not including) one with the signature
fileName := writeWAL(strings.Join(split[:nLines], "\n") + "\n")
walDir := writeWAL(strings.Join(split[:nLines], "\n") + "\n")
cs := fixedConsensusStateDummy()
// set the last step according to when we crashed vs the wal
cs.privValidator.LastHeight = 1 // first block
cs.privValidator.LastStep = thisCase.stepMap[lineStep]
toPV(cs.privValidator).LastHeight = 1 // first block
toPV(cs.privValidator).LastStep = thisCase.stepMap[lineStep]
fmt.Println("LAST STEP", cs.privValidator.LastStep)
log.Warn("setupReplayTest", "LastStep", toPV(cs.privValidator).LastStep)
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
return cs, newBlockCh, lastMsg, fileName
return cs, newBlockCh, lastMsg, walDir
}
func readTimedWALMessage(t *testing.T, walMsg string) TimedWALMessage {
var err error
var msg TimedWALMessage
wire.ReadJSON(&msg, []byte(walMsg), &err)
if err != nil {
t.Fatalf("Error reading json data: %v", err)
}
return msg
}
//-----------------------------------------------
@ -146,8 +181,8 @@ func TestReplayCrashAfterWrite(t *testing.T) {
for _, thisCase := range testCases {
split := strings.Split(thisCase.log, "\n")
for i := 0; i < len(split)-1; i++ {
cs, newBlockCh, _, f := setupReplayTest(thisCase, i+1, true)
runReplayTest(t, cs, f, newBlockCh, thisCase, i+1)
cs, newBlockCh, _, walDir := setupReplayTest(thisCase, i+1, true)
runReplayTest(t, cs, walDir, newBlockCh, thisCase, i+1)
}
}
}
@ -159,57 +194,38 @@ func TestReplayCrashAfterWrite(t *testing.T) {
func TestReplayCrashBeforeWritePropose(t *testing.T) {
for _, thisCase := range testCases {
lineNum := thisCase.proposeLine
cs, newBlockCh, proposalMsg, f := setupReplayTest(thisCase, lineNum, false) // propose
// Set LastSig
var err error
var msg ConsensusLogMessage
wire.ReadJSON(&msg, []byte(proposalMsg), &err)
// setup replay test where last message is a proposal
cs, newBlockCh, proposalMsg, walDir := setupReplayTest(thisCase, lineNum, false)
msg := readTimedWALMessage(t, proposalMsg)
proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage)
if err != nil {
t.Fatalf("Error reading json data: %v", err)
}
cs.privValidator.LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal)
cs.privValidator.LastSignature = proposal.Proposal.Signature
runReplayTest(t, cs, f, newBlockCh, thisCase, lineNum)
// Set LastSig
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal)
toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature
runReplayTest(t, cs, walDir, newBlockCh, thisCase, lineNum)
}
}
func TestReplayCrashBeforeWritePrevote(t *testing.T) {
for _, thisCase := range testCases {
lineNum := thisCase.prevoteLine
cs, newBlockCh, voteMsg, f := setupReplayTest(thisCase, lineNum, false) // prevote
types.AddListenerForEvent(cs.evsw, "tester", types.EventStringCompleteProposal(), func(data types.TMEventData) {
// Set LastSig
var err error
var msg ConsensusLogMessage
wire.ReadJSON(&msg, []byte(voteMsg), &err)
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
if err != nil {
t.Fatalf("Error reading json data: %v", err)
}
cs.privValidator.LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
cs.privValidator.LastSignature = vote.Vote.Signature
})
runReplayTest(t, cs, f, newBlockCh, thisCase, lineNum)
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.prevoteLine, types.EventStringCompleteProposal())
}
}
func TestReplayCrashBeforeWritePrecommit(t *testing.T) {
for _, thisCase := range testCases {
lineNum := thisCase.precommitLine
cs, newBlockCh, voteMsg, f := setupReplayTest(thisCase, lineNum, false) // precommit
types.AddListenerForEvent(cs.evsw, "tester", types.EventStringPolka(), func(data types.TMEventData) {
// Set LastSig
var err error
var msg ConsensusLogMessage
wire.ReadJSON(&msg, []byte(voteMsg), &err)
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
if err != nil {
t.Fatalf("Error reading json data: %v", err)
}
cs.privValidator.LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
cs.privValidator.LastSignature = vote.Vote.Signature
})
runReplayTest(t, cs, f, newBlockCh, thisCase, lineNum)
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.precommitLine, types.EventStringPolka())
}
}
func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum int, eventString string) {
// setup replay test where last message is a vote
cs, newBlockCh, voteMsg, walDir := setupReplayTest(thisCase, lineNum, false)
types.AddListenerForEvent(cs.evsw, "tester", eventString, func(data types.TMEventData) {
msg := readTimedWALMessage(t, voteMsg)
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
// Set LastSig
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
toPV(cs.privValidator).LastSignature = vote.Vote.Signature
})
runReplayTest(t, cs, walDir, newBlockCh, thisCase, lineNum)
}

+ 236
- 250
consensus/state.go View File

@ -4,10 +4,13 @@ import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"sync"
"time"
"github.com/ebuchman/fail-test"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-wire"
@ -21,15 +24,17 @@ import (
//-----------------------------------------------------------------------------
// Timeout Parameters
// All in milliseconds
// TimeoutParams holds timeouts and deltas for each round step.
// All timeouts and deltas in milliseconds.
type TimeoutParams struct {
Propose0 int
ProposeDelta int
Prevote0 int
PrevoteDelta int
Precommit0 int
PrecommitDelta int
Commit0 int
Propose0 int
ProposeDelta int
Prevote0 int
PrevoteDelta int
Precommit0 int
PrecommitDelta int
Commit0 int
SkipTimeoutCommit bool
}
// Wait this long for a proposal
@ -52,16 +57,17 @@ func (tp *TimeoutParams) Commit(t time.Time) time.Time {
return t.Add(time.Duration(tp.Commit0) * time.Millisecond)
}
// Initialize parameters from config
// InitTimeoutParamsFromConfig initializes parameters from config
func InitTimeoutParamsFromConfig(config cfg.Config) *TimeoutParams {
return &TimeoutParams{
Propose0: config.GetInt("timeout_propose"),
ProposeDelta: config.GetInt("timeout_propose_delta"),
Prevote0: config.GetInt("timeout_prevote"),
PrevoteDelta: config.GetInt("timeout_prevote_delta"),
Precommit0: config.GetInt("timeout_precommit"),
PrecommitDelta: config.GetInt("timeout_precommit_delta"),
Commit0: config.GetInt("timeout_commit"),
Propose0: config.GetInt("timeout_propose"),
ProposeDelta: config.GetInt("timeout_propose_delta"),
Prevote0: config.GetInt("timeout_prevote"),
PrevoteDelta: config.GetInt("timeout_prevote_delta"),
Precommit0: config.GetInt("timeout_precommit"),
PrecommitDelta: config.GetInt("timeout_precommit_delta"),
Commit0: config.GetInt("timeout_commit"),
SkipTimeoutCommit: config.GetBool("skip_timeout_commit"),
}
}
@ -187,8 +193,7 @@ func (rs *RoundState) StringShort() string {
//-----------------------------------------------------------------------------
var (
msgQueueSize = 1000
tickTockBufferSize = 10
msgQueueSize = 1000
)
// msgs from the reactor which may update the state
@ -209,26 +214,31 @@ func (ti *timeoutInfo) String() string {
return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step)
}
type PrivValidator interface {
GetAddress() []byte
SignVote(chainID string, vote *types.Vote) error
SignProposal(chainID string, proposal *types.Proposal) error
}
// Tracks consensus state across block heights and rounds.
type ConsensusState struct {
QuitService
BaseService
config cfg.Config
proxyAppConn proxy.AppConnConsensus
blockStore *bc.BlockStore
mempool *mempl.Mempool
config cfg.Config
proxyAppConn proxy.AppConnConsensus
blockStore *bc.BlockStore
mempool *mempl.Mempool
privValidator *types.PrivValidator
privValidator PrivValidator // for signing votes
mtx sync.Mutex
RoundState
state *sm.State // State until height-1.
peerMsgQueue chan msgInfo // serializes msgs affecting state (proposals, block parts, votes)
internalMsgQueue chan msgInfo // like peerMsgQueue but for our own proposals, parts, votes
timeoutTicker *time.Ticker // ticker for timeouts
tickChan chan timeoutInfo // start the timeoutTicker in the timeoutRoutine
tockChan chan timeoutInfo // timeouts are relayed on tockChan to the receiveRoutine
timeoutParams *TimeoutParams // parameters and functions for timeout intervals
peerMsgQueue chan msgInfo // serializes msgs affecting state (proposals, block parts, votes)
internalMsgQueue chan msgInfo // like peerMsgQueue but for our own proposals, parts, votes
timeoutTicker TimeoutTicker // ticker for timeouts
timeoutParams *TimeoutParams // parameters and functions for timeout intervals
evsw types.EventSwitch
@ -236,6 +246,13 @@ type ConsensusState struct {
replayMode bool // so we don't log signing errors during replay
nSteps int // used for testing to limit the number of transitions the state makes
// allow certain function to be overwritten for testing
decideProposal func(height, round int)
doPrevote func(height, round int)
setProposal func(proposal *types.Proposal) error
done chan struct{}
}
func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore *bc.BlockStore, mempool *mempl.Mempool) *ConsensusState {
@ -246,16 +263,20 @@ func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.Ap
mempool: mempool,
peerMsgQueue: make(chan msgInfo, msgQueueSize),
internalMsgQueue: make(chan msgInfo, msgQueueSize),
timeoutTicker: new(time.Ticker),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
timeoutTicker: NewTimeoutTicker(),
timeoutParams: InitTimeoutParamsFromConfig(config),
done: make(chan struct{}),
}
// set function defaults (may be overwritten before calling Start)
cs.decideProposal = cs.defaultDecideProposal
cs.doPrevote = cs.defaultDoPrevote
cs.setProposal = cs.defaultSetProposal
cs.updateToState(state)
// Don't call scheduleRound0 yet.
// We do that upon Start().
cs.reconstructLastCommit(state)
cs.QuitService = *NewQuitService(log, "ConsensusState", cs)
cs.BaseService = *NewBaseService(log, "ConsensusState", cs)
return cs
}
@ -295,26 +316,67 @@ func (cs *ConsensusState) GetValidators() (int, []*types.Validator) {
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
}
func (cs *ConsensusState) SetPrivValidator(priv *types.PrivValidator) {
// Sets our private validator account for signing votes.
func (cs *ConsensusState) SetPrivValidator(priv PrivValidator) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.privValidator = priv
}
// Set the local timer
func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.timeoutTicker = timeoutTicker
}
func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
cs.mtx.Lock()
defer cs.mtx.Unlock()
if height == cs.blockStore.Height() {
return cs.blockStore.LoadSeenCommit(height)
}
return cs.blockStore.LoadBlockCommit(height)
}
func (cs *ConsensusState) OnStart() error {
cs.QuitService.OnStart()
cs.BaseService.OnStart()
err := cs.OpenWAL(cs.config.GetString("cswal"))
walDir := cs.config.GetString("cs_wal_dir")
err := EnsureDir(walDir, 0700)
if err != nil {
log.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
return err
}
err = cs.OpenWAL(walDir)
if err != nil {
log.Error("Error loading ConsensusState wal", "error", err.Error())
return err
}
// If the latest block was applied in the abci handshake,
// we may not have written the current height to the wal,
// so check here and write it if not found.
// TODO: remove this and run the handhsake/replay
// through the consensus state with a mock app
gr, found, err := cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(cs.Height))
if (err == io.EOF || !found) && cs.Step == RoundStepNewHeight {
log.Warn("Height not found in wal. Writing new height", "height", cs.Height)
rs := cs.RoundStateEvent()
cs.wal.Save(rs)
} else if err != nil {
return err
}
if gr != nil {
gr.Close()
}
// we need the timeoutRoutine for replay so
// we don't block on the tick chan.
// NOTE: we will get a build up of garbage go routines
// firing on the tockChan until the receiveRoutine is started
// to deal with them (by that point, at most one will be valid)
go cs.timeoutRoutine()
cs.timeoutTicker.Start()
// we may have lost some votes if the process crashed
// reload from consensus log to catchup
@ -336,23 +398,32 @@ func (cs *ConsensusState) OnStart() error {
// timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan
// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions
func (cs *ConsensusState) startRoutines(maxSteps int) {
go cs.timeoutRoutine()
cs.timeoutTicker.Start()
go cs.receiveRoutine(maxSteps)
}
func (cs *ConsensusState) OnStop() {
cs.QuitService.OnStop()
cs.BaseService.OnStop()
cs.timeoutTicker.Stop()
// Make BaseService.Wait() wait until cs.wal.Wait()
if cs.wal != nil && cs.IsRunning() {
cs.wal.Wait()
}
}
// NOTE: be sure to Stop() the event switch and drain
// any event channels or this may deadlock
func (cs *ConsensusState) Wait() {
<-cs.done
}
// Open file to log all consensus messages and timeouts for deterministic accountability
func (cs *ConsensusState) OpenWAL(file string) (err error) {
func (cs *ConsensusState) OpenWAL(walDir string) (err error) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
wal, err := NewWAL(file, cs.config.GetBool("cswal_light"))
wal, err := NewWAL(walDir, cs.config.GetBool("cs_wal_light"))
if err != nil {
return err
}
@ -366,15 +437,15 @@ func (cs *ConsensusState) OpenWAL(file string) (err error) {
// TODO: should these return anything or let callers just use events?
// May block on send if queue is full.
func (cs *ConsensusState) AddVote(valIndex int, vote *types.Vote, peerKey string) (added bool, address []byte, err error) {
func (cs *ConsensusState) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
if peerKey == "" {
cs.internalMsgQueue <- msgInfo{&VoteMessage{valIndex, vote}, ""}
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
} else {
cs.peerMsgQueue <- msgInfo{&VoteMessage{valIndex, vote}, peerKey}
cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerKey}
}
// TODO: wait for event?!
return false, nil, nil
return false, nil
}
// May block on send if queue is full.
@ -429,17 +500,12 @@ func (cs *ConsensusState) updateRoundStep(round int, step RoundStepType) {
func (cs *ConsensusState) scheduleRound0(rs *RoundState) {
//log.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now())
if sleepDuration < time.Duration(0) {
sleepDuration = time.Duration(0)
}
cs.scheduleTimeout(sleepDuration, rs.Height, 0, RoundStepNewHeight)
}
// Attempt to schedule a timeout by sending timeoutInfo on the tickChan.
// The timeoutRoutine is alwaya available to read from tickChan (it won't block).
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan)
func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height, round int, step RoundStepType) {
cs.tickChan <- timeoutInfo{duration, height, round, step}
cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step})
}
// send a msg into the receiveRoutine regarding our own proposal, block part, or vote
@ -464,11 +530,11 @@ func (cs *ConsensusState) reconstructLastCommit(state *sm.State) {
}
seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight)
lastPrecommits := types.NewVoteSet(cs.config.GetString("chain_id"), state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators)
for idx, precommit := range seenCommit.Precommits {
for _, precommit := range seenCommit.Precommits {
if precommit == nil {
continue
}
added, _, err := lastPrecommits.AddByIndex(idx, precommit)
added, err := lastPrecommits.AddVote(precommit)
if !added || err != nil {
PanicCrisis(Fmt("Failed to reconstruct LastCommit: %v", err))
}
@ -525,7 +591,6 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
} else {
cs.StartTime = cs.timeoutParams.Commit(cs.CommitTime)
}
cs.CommitTime = time.Time{}
cs.Validators = validators
cs.Proposal = nil
cs.ProposalBlock = nil
@ -557,55 +622,6 @@ func (cs *ConsensusState) newStep() {
//-----------------------------------------
// the main go routines
// the state machine sends on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (cs *ConsensusState) timeoutRoutine() {
log.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-cs.tickChan:
log.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// ignore tickers for old height/round/step
if newti.Height < ti.Height {
continue
} else if newti.Height == ti.Height {
if newti.Round < ti.Round {
continue
} else if newti.Round == ti.Round {
if ti.Step > 0 && newti.Step <= ti.Step {
continue
}
}
}
ti = newti
// if the newti has duration == 0, we relay to the tockChan immediately (no timeout)
if ti.Duration == time.Duration(0) {
go func(t timeoutInfo) { cs.tockChan <- t }(ti)
continue
}
log.Debug("Scheduling timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
cs.timeoutTicker.Stop()
cs.timeoutTicker = time.NewTicker(ti.Duration)
case <-cs.timeoutTicker.C:
log.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
cs.timeoutTicker.Stop()
// go routine here gaurantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(t timeoutInfo) { cs.tockChan <- t }(ti)
case <-cs.Quit:
return
}
}
}
// a nice idea but probably more trouble than its worth
func (cs *ConsensusState) stopTimer() {
cs.timeoutTicker.Stop()
@ -637,29 +653,23 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
cs.wal.Save(mi)
// handles proposals, block parts, votes
cs.handleMsg(mi, rs)
case ti := <-cs.tockChan:
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
cs.wal.Save(ti)
// if the timeout is relevant to the rs
// go to the next step
cs.handleTimeout(ti, rs)
case <-cs.Quit:
// drain the internalMsgQueue in case we eg. signed a proposal but it didn't hit the wal
FLUSH:
for {
select {
case mi = <-cs.internalMsgQueue:
cs.wal.Save(mi)
cs.handleMsg(mi, rs)
default:
break FLUSH
}
}
// NOTE: the internalMsgQueue may have signed messages from our
// priv_val that haven't hit the WAL, but its ok because
// priv_val tracks LastSig
// close wal now that we're done writing to it
if cs.wal != nil {
cs.wal.Close()
cs.wal.Stop()
}
close(cs.done)
return
}
}
@ -686,7 +696,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo, rs RoundState) {
case *VoteMessage:
// attempt to add the vote and dupeout the validator if its a duplicate signature
// if the vote gives us a 2/3-any or 2/3-one, we transition
err := cs.tryAddVote(msg.ValidatorIndex, msg.Vote, peerKey)
err := cs.tryAddVote(msg.Vote, peerKey)
if err == ErrAddingVote {
// TODO: punish peer
}
@ -721,7 +731,7 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs RoundState) {
switch ti.Step {
case RoundStepNewHeight:
// NewRound event fired from enterNewRound.
// XXX: should we fire timeout here?
// XXX: should we fire timeout here (for timeout commit)?
cs.enterNewRound(ti.Height, 0)
case RoundStepPropose:
types.FireEventTimeoutPropose(cs.evsw, cs.RoundStateEvent())
@ -817,16 +827,16 @@ func (cs *ConsensusState) enterPropose(height int, round int) {
return
}
if !bytes.Equal(cs.Validators.Proposer().Address, cs.privValidator.Address) {
if !bytes.Equal(cs.Validators.Proposer().Address, cs.privValidator.GetAddress()) {
log.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.Proposer().Address, "privValidator", cs.privValidator)
} else {
log.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.Proposer().Address, "privValidator", cs.privValidator)
cs.decideProposal(height, round)
}
}
}
func (cs *ConsensusState) decideProposal(height, round int) {
func (cs *ConsensusState) defaultDecideProposal(height, round int) {
var block *types.Block
var blockParts *types.PartSet
@ -843,7 +853,8 @@ func (cs *ConsensusState) decideProposal(height, round int) {
}
// Make proposal
proposal := types.NewProposal(height, round, blockParts.Header(), cs.Votes.POLRound())
polRound, polBlockID := cs.Votes.POLInfo()
proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
err := cs.privValidator.SignProposal(cs.state.ChainID, proposal)
if err == nil {
// Set fields
@ -866,7 +877,6 @@ func (cs *ConsensusState) decideProposal(height, round int) {
log.Warn("enterPropose: Error signing proposal", "height", height, "round", round, "error", err)
}
}
}
// Returns true if the proposal block is complete &&
@ -906,26 +916,8 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
// Mempool validated transactions
txs := cs.mempool.Reap(cs.config.GetInt("block_size"))
block = &types.Block{
Header: &types.Header{
ChainID: cs.state.ChainID,
Height: cs.Height,
Time: time.Now(),
NumTxs: len(txs),
LastBlockHash: cs.state.LastBlockHash,
LastBlockParts: cs.state.LastBlockParts,
ValidatorsHash: cs.state.Validators.Hash(),
AppHash: cs.state.AppHash, // state merkle root of txs from the previous block.
},
LastCommit: commit,
Data: &types.Data{
Txs: txs,
},
}
block.FillHeader()
blockParts = block.MakePartSet()
return block, blockParts
return types.MakeBlock(cs.Height, cs.state.ChainID, txs, commit,
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.GetInt("block_part_size"))
}
// Enter: `timeoutPropose` after entering Propose.
@ -964,10 +956,10 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
// (so we have more time to try and collect +2/3 prevotes for a single block)
}
func (cs *ConsensusState) doPrevote(height int, round int) {
func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
// If a block is locked, prevote that.
if cs.LockedBlock != nil {
log.Info("enterPrevote: Block was locked")
log.Notice("enterPrevote: Block was locked")
cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
return
}
@ -1038,14 +1030,14 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
cs.newStep()
}()
hash, partsHeader, ok := cs.Votes.Prevotes(round).TwoThirdsMajority()
blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority()
// If we don't have a polka, we must precommit nil
if !ok {
if cs.LockedBlock != nil {
log.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
log.Notice("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
} else {
log.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
log.Notice("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
}
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
return
@ -1055,12 +1047,13 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
types.FireEventPolka(cs.evsw, cs.RoundStateEvent())
// the latest POLRound should be this round
if cs.Votes.POLRound() < round {
PanicSanity(Fmt("This POLRound should be %v but got %", round, cs.Votes.POLRound()))
polRound, _ := cs.Votes.POLInfo()
if polRound < round {
PanicSanity(Fmt("This POLRound should be %v but got %", round, polRound))
}
// +2/3 prevoted nil. Unlock and precommit nil.
if len(hash) == 0 {
if len(blockID.Hash) == 0 {
if cs.LockedBlock == nil {
log.Notice("enterPrecommit: +2/3 prevoted for nil.")
} else {
@ -1077,17 +1070,17 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
// At this point, +2/3 prevoted for a particular block.
// If we're already locked on that block, precommit it, and update the LockedRound
if cs.LockedBlock.HashesTo(hash) {
if cs.LockedBlock.HashesTo(blockID.Hash) {
log.Notice("enterPrecommit: +2/3 prevoted locked block. Relocking")
cs.LockedRound = round
types.FireEventRelock(cs.evsw, cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, hash, partsHeader)
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
return
}
// If +2/3 prevoted for proposal block, stage and precommit it
if cs.ProposalBlock.HashesTo(hash) {
log.Notice("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", hash)
if cs.ProposalBlock.HashesTo(blockID.Hash) {
log.Notice("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
// Validate the block.
if err := cs.state.ValidateBlock(cs.ProposalBlock); err != nil {
PanicConsensus(Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
@ -1096,7 +1089,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
cs.LockedBlock = cs.ProposalBlock
cs.LockedBlockParts = cs.ProposalBlockParts
types.FireEventLock(cs.evsw, cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, hash, partsHeader)
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
return
}
@ -1107,9 +1100,9 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
if !cs.ProposalBlockParts.HasHeader(partsHeader) {
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
cs.ProposalBlock = nil
cs.ProposalBlockParts = types.NewPartSetFromHeader(partsHeader)
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
}
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
@ -1151,13 +1144,14 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
// keep cs.Round the same, commitRound points to the right Precommits set.
cs.updateRoundStep(cs.Round, RoundStepCommit)
cs.CommitRound = commitRound
cs.CommitTime = time.Now()
cs.newStep()
// Maybe finalize immediately.
cs.tryFinalizeCommit(height)
}()
hash, partsHeader, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority()
blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority()
if !ok {
PanicSanity("RunActionCommit() expects +2/3 precommits")
}
@ -1165,18 +1159,18 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
// The Locked* fields no longer matter.
// Move them over to ProposalBlock if they match the commit hash,
// otherwise they'll be cleared in updateToState.
if cs.LockedBlock.HashesTo(hash) {
if cs.LockedBlock.HashesTo(blockID.Hash) {
cs.ProposalBlock = cs.LockedBlock
cs.ProposalBlockParts = cs.LockedBlockParts
}
// If we don't have the block being committed, set up to get it.
if !cs.ProposalBlock.HashesTo(hash) {
if !cs.ProposalBlockParts.HasHeader(partsHeader) {
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
// We're getting the wrong block.
// Set up ProposalBlockParts and keep waiting.
cs.ProposalBlock = nil
cs.ProposalBlockParts = types.NewPartSetFromHeader(partsHeader)
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
} else {
// We just need to keep waiting.
}
@ -1189,12 +1183,12 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) {
PanicSanity(Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
}
hash, _, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
if !ok || len(hash) == 0 {
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
if !ok || len(blockID.Hash) == 0 {
log.Warn("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.")
return
}
if !cs.ProposalBlock.HashesTo(hash) {
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
// TODO: this happens every time if we're not a validator (ugly logs)
// TODO: ^^ wait, why does it matter that we're a validator?
log.Warn("Attempt to finalize failed. We don't have the commit block.")
@ -1211,66 +1205,67 @@ func (cs *ConsensusState) finalizeCommit(height int) {
return
}
hash, header, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
if !ok {
PanicSanity(Fmt("Cannot finalizeCommit, commit does not have two thirds majority"))
}
if !blockParts.HasHeader(header) {
if !blockParts.HasHeader(blockID.PartsHeader) {
PanicSanity(Fmt("Expected ProposalBlockParts header to be commit header"))
}
if !block.HashesTo(hash) {
if !block.HashesTo(blockID.Hash) {
PanicSanity(Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
}
if err := cs.state.ValidateBlock(block); err != nil {
PanicConsensus(Fmt("+2/3 committed an invalid block: %v", err))
}
log.Notice(Fmt("Finalizing commit of block with %d txs", block.NumTxs), "height", block.Height, "hash", block.Hash())
log.Notice(Fmt("Finalizing commit of block with %d txs", block.NumTxs),
"height", block.Height, "hash", block.Hash(), "root", block.AppHash)
log.Info(Fmt("%v", block))
// Fire off event for new block.
// TODO: Handle app failure. See #177
types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block})
types.FireEventNewBlockHeader(cs.evsw, types.EventDataNewBlockHeader{block.Header})
fail.Fail() // XXX
// Save to blockStore.
if cs.blockStore.Height() < block.Height {
// NOTE: the seenCommit is local justification to commit this block,
// but may differ from the LastCommit included in the next block
precommits := cs.Votes.Precommits(cs.CommitRound)
seenCommit := precommits.MakeCommit()
cs.blockStore.SaveBlock(block, blockParts, seenCommit)
} else {
log.Warn("Why are we finalizeCommitting a block height we already have?", "height", block.Height)
}
fail.Fail() // XXX
// Create a copy of the state for staging
// and an event cache for txs
stateCopy := cs.state.Copy()
// event cache for txs
eventCache := types.NewEventCache(cs.evsw)
// Run the block on the State:
// + update validator sets
// + run txs on the proxyAppConn
err := stateCopy.ExecBlock(eventCache, cs.proxyAppConn, block, blockParts.Header())
// Execute and commit the block, and update the mempool.
// All calls to the proxyAppConn should come here.
// NOTE: the block.AppHash wont reflect these txs until the next block
err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
if err != nil {
// TODO: handle this gracefully.
PanicQ(Fmt("Exec failed for application: %v", err))
// TODO!
}
// lock mempool, commit state, update mempoool
err = cs.commitStateUpdateMempool(stateCopy, block)
if err != nil {
// TODO: handle this gracefully.
PanicQ(Fmt("Commit failed for application: %v", err))
}
fail.Fail() // XXX
// txs committed, bad ones removed from mepool; fire events
// NOTE: the block.AppHash wont reflect these txs until the next block
// Fire off event for new block.
// TODO: Handle app failure. See #177
types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block})
types.FireEventNewBlockHeader(cs.evsw, types.EventDataNewBlockHeader{block.Header})
eventCache.Flush()
// Save to blockStore.
if cs.blockStore.Height() < block.Height {
precommits := cs.Votes.Precommits(cs.CommitRound)
seenCommit := precommits.MakeCommit()
cs.blockStore.SaveBlock(block, blockParts, seenCommit)
}
// Save the state.
stateCopy.Save()
fail.Fail() // XXX
// NewHeightStep!
cs.updateToState(stateCopy)
@ -1285,36 +1280,11 @@ func (cs *ConsensusState) finalizeCommit(height int) {
return
}
// mempool must be locked during commit and update
// because state is typically reset on Commit and old txs must be replayed
// against committed state before new txs are run in the mempool, lest they be invalid
func (cs *ConsensusState) commitStateUpdateMempool(s *sm.State, block *types.Block) error {
cs.mempool.Lock()
defer cs.mempool.Unlock()
// Commit block, get hash back
res := cs.proxyAppConn.CommitSync()
if res.IsErr() {
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
return res
}
if res.Log != "" {
log.Debug("Commit.Log: " + res.Log)
}
// Set the state's new AppHash
s.AppHash = res.Data
// Update mempool.
cs.mempool.Update(block.Height, block.Txs)
return nil
}
//-----------------------------------------------------------------------------
func (cs *ConsensusState) setProposal(proposal *types.Proposal) error {
func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
// Already have one
// TODO: possibly catch double proposals
if cs.Proposal != nil {
return nil
}
@ -1382,15 +1352,15 @@ func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, ver
}
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
func (cs *ConsensusState) tryAddVote(valIndex int, vote *types.Vote, peerKey string) error {
_, _, err := cs.addVote(valIndex, vote, peerKey)
func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
_, err := cs.addVote(vote, peerKey)
if err != nil {
// If the vote height is off, we'll just ignore it,
// But if it's a conflicting sig, broadcast evidence tx for slashing.
// If it's otherwise invalid, punish peer.
if err == ErrVoteHeightMismatch {
return err
} else if _, ok := err.(*types.ErrVoteConflictingSignature); ok {
} else if _, ok := err.(*types.ErrVoteConflictingVotes); ok {
if peerKey == "" {
log.Warn("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
return err
@ -1416,31 +1386,39 @@ func (cs *ConsensusState) tryAddVote(valIndex int, vote *types.Vote, peerKey str
//-----------------------------------------------------------------------------
func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string) (added bool, address []byte, err error) {
func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, err error) {
log.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "csHeight", cs.Height)
// A precommit for the previous height?
// These come in while we wait timeoutCommit
if vote.Height+1 == cs.Height {
if !(cs.Step == RoundStepNewHeight && vote.Type == types.VoteTypePrecommit) {
// TODO: give the reason ..
// fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.")
return added, nil, ErrVoteHeightMismatch
return added, ErrVoteHeightMismatch
}
added, address, err = cs.LastCommit.AddByIndex(valIndex, vote)
added, err = cs.LastCommit.AddVote(vote)
if added {
log.Info(Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
types.FireEventVote(cs.evsw, types.EventDataVote{valIndex, address, vote})
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
// if we can skip timeoutCommit and have all the votes now,
if cs.timeoutParams.SkipTimeoutCommit && cs.LastCommit.HasAll() {
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
}
}
return
}
// A prevote/precommit for this height?
if vote.Height == cs.Height {
height := cs.Height
added, address, err = cs.Votes.AddByIndex(valIndex, vote, peerKey)
added, err = cs.Votes.AddVote(vote, peerKey)
if added {
types.FireEventVote(cs.evsw, types.EventDataVote{valIndex, address, vote})
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
switch vote.Type {
case types.VoteTypePrevote:
@ -1452,8 +1430,8 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
// there.
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
hash, _, ok := prevotes.TwoThirdsMajority()
if ok && !cs.LockedBlock.HashesTo(hash) {
blockID, ok := prevotes.TwoThirdsMajority()
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
log.Notice("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
cs.LockedRound = 0
cs.LockedBlock = nil
@ -1479,20 +1457,27 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string
case types.VoteTypePrecommit:
precommits := cs.Votes.Precommits(vote.Round)
log.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
hash, _, ok := precommits.TwoThirdsMajority()
blockID, ok := precommits.TwoThirdsMajority()
if ok {
if len(hash) == 0 {
if len(blockID.Hash) == 0 {
cs.enterNewRound(height, vote.Round+1)
} else {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
cs.enterCommit(height, vote.Round)
if cs.timeoutParams.SkipTimeoutCommit && precommits.HasAll() {
// if we have all the votes now,
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
}
}
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
cs.enterPrecommitWait(height, vote.Round)
//}()
}
default:
PanicSanity(Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
@ -1510,12 +1495,15 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string
}
func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
addr := cs.privValidator.GetAddress()
valIndex, _ := cs.Validators.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: valIndex,
Height: cs.Height,
Round: cs.Round,
Type: type_,
BlockHash: hash,
BlockPartsHeader: header,
BlockID: types.BlockID{hash, header},
}
err := cs.privValidator.SignVote(cs.state.ChainID, vote)
return vote, err
@ -1523,21 +1511,19 @@ func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSet
// sign the vote and publish on internalMsgQueue
func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote {
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.Address) {
// if we don't have a key or we're not in the validator set, do nothing
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
return nil
}
vote, err := cs.signVote(type_, hash, header)
if err == nil {
// TODO: store our index in the cs so we don't have to do this every time
valIndex, _ := cs.Validators.GetByAddress(cs.privValidator.Address)
cs.sendInternalMessage(msgInfo{&VoteMessage{valIndex, vote}, ""})
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
log.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
return vote
} else {
if !cs.replayMode {
log.Warn("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
}
//if !cs.replayMode {
log.Warn("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
//}
return nil
}
}


+ 144
- 111
consensus/state_test.go View File

@ -30,7 +30,7 @@ x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevot
FullRoundSuite
x * TestFullRound1 - 1 val, full successful round
x * TestFullRoundNil - 1 val, full round of nil
x * TestFullRound2 - 2 vals, both required for fuill round
x * TestFullRound2 - 2 vals, both required for full round
LockSuite
x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first.
x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
@ -66,15 +66,15 @@ func TestProposerSelection0(t *testing.T) {
// lets commit a block and ensure proposer for the next height is correct
prop := cs1.GetRoundState().Validators.Proposer()
if !bytes.Equal(prop.Address, cs1.privValidator.Address) {
panic(Fmt("expected proposer to be validator %d. Got %X", 0, prop.Address))
if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
}
// wait for complete proposal
<-proposalCh
rs := cs1.GetRoundState()
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil, vss[1:]...)
signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...)
// wait for new round so next validator is set
<-newRoundCh
@ -106,7 +106,7 @@ func TestProposerSelection2(t *testing.T) {
}
rs := cs1.GetRoundState()
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, rs.ProposalBlockParts.Header(), nil, vss[1:]...)
signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...)
<-newRoundCh // wait for the new round event each round
incrementRound(vss[1:]...)
@ -179,12 +179,14 @@ func TestEnterProposeYesPrivValidator(t *testing.T) {
func TestBadProposal(t *testing.T) {
cs1, vss := randConsensusState(2)
height, round := cs1.Height, cs1.Round
cs2 := vss[1]
vs2 := vss[1]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, cs2)
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
// make the second validator the proposer by incrementing round
round = round + 1
@ -197,10 +199,10 @@ func TestBadProposal(t *testing.T) {
}
stateHash[0] = byte((stateHash[0] + 1) % 255)
propBlock.AppHash = stateHash
propBlockParts := propBlock.MakePartSet()
proposal := types.NewProposal(cs2.Height, round, propBlockParts.Header(), -1)
if err := cs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
panic("failed to sign bad proposal: " + err.Error())
propBlockParts := propBlock.MakePartSet(partSize)
proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
if err := vs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
// set the proposal block
@ -217,14 +219,15 @@ func TestBadProposal(t *testing.T) {
validatePrevote(t, cs1, round, vss[0], nil)
// add bad prevote from cs2 and wait for it
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
// add bad prevote from vs2 and wait for it
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// wait for precommit
<-voteCh
validatePrecommit(t, cs1, round, 0, vss[0], nil, nil)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
}
//----------------------------------------------------------------------------------------------------
@ -281,7 +284,7 @@ func TestFullRoundNil(t *testing.T) {
// where the first validator has to wait for votes from the second
func TestFullRound2(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
height, round := cs1.Height, cs1.Round
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
@ -296,8 +299,9 @@ func TestFullRound2(t *testing.T) {
rs := cs1.GetRoundState()
propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header()
// prevote arrives from cs2:
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlockHash, propPartsHeader, voteCh)
// prevote arrives from vs2:
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2)
<-voteCh
<-voteCh //precommit
@ -306,8 +310,9 @@ func TestFullRound2(t *testing.T) {
// we should be stuck in limbo waiting for more precommits
// precommit arrives from cs2:
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlockHash, propPartsHeader, voteCh)
// precommit arrives from vs2:
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2)
<-voteCh
// wait to finish commit, propose in next height
<-newBlockCh
@ -320,9 +325,11 @@ func TestFullRound2(t *testing.T) {
// two vals take turns proposing. val1 locks on first one, precommits nil on everything else
func TestLockNoPOL(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
height := cs1.Height
partSize := config.GetInt("block_part_size")
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
@ -344,8 +351,9 @@ func TestLockNoPOL(t *testing.T) {
<-voteCh // prevote
// we should now be stuck in limbo forever, waiting for more prevotes
// prevote arrives from cs2:
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), voteCh)
// prevote arrives from vs2:
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2)
<-voteCh // prevote
<-voteCh // precommit
@ -358,7 +366,8 @@ func TestLockNoPOL(t *testing.T) {
hash := make([]byte, len(theBlockHash))
copy(hash, theBlockHash)
hash[0] = byte((hash[0] + 1) % 255)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh // precommit
// (note we're entering precommit for a second time this round)
// but with invalid args. then we enterPrecommitWait, and the timeout to new round
@ -372,7 +381,7 @@ func TestLockNoPOL(t *testing.T) {
Round2 (cs1, B) // B B2
*/
incrementRound(cs2)
incrementRound(vs2)
// now we're on a new round and not the proposer, so wait for timeout
re = <-timeoutProposeCh
@ -389,7 +398,8 @@ func TestLockNoPOL(t *testing.T) {
validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
// add a conflicting prevote from the other validator
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// now we're going to enter prevote again, but with invalid args
// and then prevote wait, which should timeout. then wait for precommit
@ -401,9 +411,10 @@ func TestLockNoPOL(t *testing.T) {
// we should precommit nil and be locked on the proposal
validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash)
// add conflicting precommit from cs2
// add conflicting precommit from vs2
// NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// (note we're entering precommit for a second time this round, but with invalid args
// then we enterPrecommitWait and timeout into NewRound
@ -412,10 +423,10 @@ func TestLockNoPOL(t *testing.T) {
<-newRoundCh
log.Notice("#### ONTO ROUND 2")
/*
Round3 (cs2, _) // B, B2
Round3 (vs2, _) // B, B2
*/
incrementRound(cs2)
incrementRound(vs2)
re = <-proposalCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
@ -429,33 +440,36 @@ func TestLockNoPOL(t *testing.T) {
validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash())
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
<-timeoutWaitCh // prevote wait
<-voteCh // precommit
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh) // NOTE: conflicting precommits at same height
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
<-voteCh
<-timeoutWaitCh
// before we time out into new round, set next proposal block
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
if prop == nil || propBlock == nil {
panic("Failed to create proposal block with cs2")
t.Fatal("Failed to create proposal block with vs2")
}
incrementRound(cs2)
incrementRound(vs2)
<-newRoundCh
log.Notice("#### ONTO ROUND 3")
/*
Round4 (cs2, C) // B C // B C
Round4 (vs2, C) // B C // B C
*/
// now we're on a new round and not the proposer
// so set the proposal block
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(), "")
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), "")
<-proposalCh
<-voteCh // prevote
@ -463,19 +477,24 @@ func TestLockNoPOL(t *testing.T) {
// prevote for locked block (not proposal)
validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash())
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
<-timeoutWaitCh
<-voteCh
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh) // NOTE: conflicting precommits at same height
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
<-voteCh
}
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestLockPOLRelock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
@ -484,14 +503,14 @@ func TestLockPOLRelock(t *testing.T) {
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1)
log.Debug("cs2 last round", "lr", cs2.PrivValidator.LastRound)
log.Debug("vs2 last round", "lr", vs2.PrivValidator.LastRound)
// everything done from perspective of cs1
/*
Round1 (cs1, B) // B B B B// B nil B nil
eg. cs2 and cs4 didn't see the 2/3 prevotes
eg. vs2 and vs4 didn't see the 2/3 prevotes
*/
// start round and wait for propose and prevote
@ -501,26 +520,27 @@ func TestLockPOLRelock(t *testing.T) {
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
theBlockHash := rs.ProposalBlock.Hash()
theBlockPartsHeader := rs.ProposalBlockParts.Header()
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, theBlockHash, theBlockPartsHeader, voteCh, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
<-voteCh // our precommit
// the proposed block should now be locked and our precommit added
validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
// add precommits from the rest
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, voteCh, cs2, cs4)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, theBlockHash, theBlockPartsHeader, voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // precommits
// before we timeout to the new round set the new proposal
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
propBlockParts := propBlock.MakePartSet()
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockParts := propBlock.MakePartSet(partSize)
propBlockHash := propBlock.Hash()
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout to new round
<-timeoutWaitCh
@ -532,7 +552,7 @@ func TestLockPOLRelock(t *testing.T) {
log.Notice("### ONTO ROUND 1")
/*
Round2 (cs2, C) // B C C C // C C C _)
Round2 (vs2, C) // B C C C // C C C _)
cs1 changes lock!
*/
@ -550,7 +570,8 @@ func TestLockPOLRelock(t *testing.T) {
validatePrevote(t, cs1, 0, vss[0], theBlockHash)
// now lets add prevotes from everyone else for the new block
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash, propBlockParts.Header(), voteCh, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
// now either we go to PrevoteWait or Precommit
select {
@ -564,7 +585,8 @@ func TestLockPOLRelock(t *testing.T) {
// we should have unlocked and locked on the new block
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, propBlockHash, propBlockParts.Header(), voteCh, cs2, cs3)
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
_, _ = <-voteCh, <-voteCh
be := <-newBlockCh
b := be.(types.EventDataNewBlockHeader)
@ -582,14 +604,16 @@ func TestLockPOLRelock(t *testing.T) {
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestLockPOLUnlock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// everything done from perspective of cs1
@ -608,7 +632,7 @@ func TestLockPOLUnlock(t *testing.T) {
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
<-voteCh //precommit
@ -618,14 +642,14 @@ func TestLockPOLUnlock(t *testing.T) {
rs = cs1.GetRoundState()
// add precommits from the rest
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs4)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
// before we time out into new round, set next proposal block
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
propBlockParts := propBlock.MakePartSet()
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockParts := propBlock.MakePartSet(partSize)
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout to new round
re = <-timeoutWaitCh
@ -638,7 +662,7 @@ func TestLockPOLUnlock(t *testing.T) {
<-newRoundCh
log.Notice("#### ONTO ROUND 1")
/*
Round2 (cs2, C) // B nil nil nil // nil nil nil _
Round2 (vs2, C) // B nil nil nil // nil nil nil _
cs1 unlocks!
*/
@ -655,7 +679,7 @@ func TestLockPOLUnlock(t *testing.T) {
<-voteCh
validatePrevote(t, cs1, 0, vss[0], lockedBlockHash)
// now lets add prevotes from everyone else for nil (a polka!)
signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4)
// the polka makes us unlock and precommit nil
<-unlockCh
@ -665,7 +689,7 @@ func TestLockPOLUnlock(t *testing.T) {
// NOTE: since we don't relock on nil, the lock round is 0
validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
<-newRoundCh
}
@ -675,13 +699,15 @@ func TestLockPOLUnlock(t *testing.T) {
// then we see the polka from round 1 but shouldn't unlock
func TestLockPOLSafety1(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@ -695,7 +721,7 @@ func TestLockPOLSafety1(t *testing.T) {
validatePrevote(t, cs1, 0, vss[0], propBlock.Hash())
// the others sign a polka but we don't see it
prevotes := signVoteMany(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet().Header(), cs2, cs3, cs4)
prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
// before we time out into new round, set next proposer
// and next proposal block
@ -709,13 +735,13 @@ func TestLockPOLSafety1(t *testing.T) {
log.Warn("old prop", "hash", fmt.Sprintf("%X", propBlock.Hash()))
// we do see them precommit nil
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockHash := propBlock.Hash()
propBlockParts := propBlock.MakePartSet()
propBlockParts := propBlock.MakePartSet(partSize)
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
//XXX: this isnt gauranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
@ -746,18 +772,18 @@ func TestLockPOLSafety1(t *testing.T) {
validatePrevote(t, cs1, 1, vss[0], propBlockHash)
// now we see the others prevote for it, so we should lock on it
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash, propBlockParts.Header(), nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
<-voteCh // precommit
// we should have precommitted
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
<-timeoutWaitCh
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
<-newRoundCh
@ -778,7 +804,7 @@ func TestLockPOLSafety1(t *testing.T) {
newStepCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRoundStep(), 1)
// add prevotes from the earlier round
addVoteToFromMany(cs1, prevotes, cs2, cs3, cs4)
addVotes(cs1, prevotes...)
log.Warn("Done adding prevotes!")
@ -794,30 +820,33 @@ func TestLockPOLSafety1(t *testing.T) {
// dont see P0, lock on P1 at R1, dont unlock using P0 at R2
func TestLockPOLSafety2(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// the block for R0: gets polkad but we miss it
// (even though we signed it, shhh)
_, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round)
propBlockHash0 := propBlock0.Hash()
propBlockParts0 := propBlock0.MakePartSet()
propBlockParts0 := propBlock0.MakePartSet(partSize)
// the others sign a polka but we don't see it
prevotes := signVoteMany(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), cs2, cs3, cs4)
prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)
// the block for round 1
prop1, propBlock1 := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockHash1 := propBlock1.Hash()
propBlockParts1 := propBlock1.MakePartSet()
propBlockParts1 := propBlock1.MakePartSet(partSize)
propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()}
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
cs1.updateRoundStep(0, RoundStepPrecommitWait)
@ -832,28 +861,30 @@ func TestLockPOLSafety2(t *testing.T) {
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash1, propBlockParts1.Header(), nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
<-voteCh // precommit
// the proposed block should now be locked and our precommit added
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1)
// add precommits from the rest
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs4)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, propBlockHash1, propBlockParts1.Header(), nil)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3)
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout of precommit wait to new round
<-timeoutWaitCh
// in round 2 we see the polkad block from round 0
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0)
if err := cs3.SignProposal(config.GetString("chain_id"), newProp); err != nil {
panic(err)
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1)
if err := vs3.SignProposal(config.GetString("chain_id"), newProp); err != nil {
t.Fatal(err)
}
cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer")
addVoteToFromMany(cs1, prevotes, cs2, cs3, cs4) // add the pol votes
// Add the pol votes
addVotes(cs1, prevotes...)
<-newRoundCh
log.Notice("### ONTO Round 2")
@ -884,13 +915,13 @@ func TestLockPOLSafety2(t *testing.T) {
/*
func TestSlashingPrevotes(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@ -904,7 +935,7 @@ func TestSlashingPrevotes(t *testing.T) {
// add one for a different block should cause us to go into prevote wait
hash := rs.ProposalBlock.Hash()
hash[0] = byte(hash[0]+1) % 255
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlockParts.Header(), nil)
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2)
<-timeoutWaitCh
@ -912,20 +943,20 @@ func TestSlashingPrevotes(t *testing.T) {
// away and ignore more prevotes (and thus fail to slash!)
// add the conflicting vote
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),nil)
signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
// XXX: Check for existence of Dupeout info
}
func TestSlashingPrecommits(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@ -933,8 +964,8 @@ func TestSlashingPrecommits(t *testing.T) {
re := <-proposalCh
<-voteCh // prevote
// add prevote from cs2
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil)
// add prevote from vs2
signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
<-voteCh // precommit
@ -942,13 +973,13 @@ func TestSlashingPrecommits(t *testing.T) {
// add one for a different block should cause us to go into prevote wait
hash := rs.ProposalBlock.Hash()
hash[0] = byte(hash[0]+1) % 255
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlockParts.Header(),nil)
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2)
// NOTE: we have to send the vote for different block first so we don't just go into precommit round right
// away and ignore more prevotes (and thus fail to slash!)
// add precommit from cs2
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),nil)
// add precommit from vs2
signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
// XXX: Check for existence of Dupeout info
}
@ -964,13 +995,15 @@ func TestSlashingPrecommits(t *testing.T) {
// we receive a final precommit after going into next round, but others might have gone to commit already!
func TestHalt1(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlock(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@ -978,23 +1011,23 @@ func TestHalt1(t *testing.T) {
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
propBlock := rs.ProposalBlock
propBlockParts := propBlock.MakePartSet()
propBlockParts := propBlock.MakePartSet(partSize)
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlock.Hash(), propBlockParts.Header(), nil, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4)
<-voteCh // precommit
// the proposed block should now be locked and our precommit added
validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash())
// add precommits from the rest
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, nil, types.PartSetHeader{}, nil) // didnt receive proposal
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, propBlock.Hash(), propBlockParts.Header(), nil)
// we receive this later, but cs3 might receive it earlier and with ours will go to commit!
precommit4 := signVote(cs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3)
// we receive this later, but vs3 might receive it earlier and with ours will go to commit!
precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout to new round
<-timeoutWaitCh
@ -1012,7 +1045,7 @@ func TestHalt1(t *testing.T) {
validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash())
// now we receive the precommit from the previous round
addVoteToFrom(cs1, cs4, precommit4)
addVotes(cs1, precommit4)
// receiving that precommit should take us straight to commit
<-newBlockCh


+ 5
- 5
consensus/test_data/README.md View File

@ -1,8 +1,9 @@
# Generating test data
The easiest way to generate this data is to copy `~/.tendermint_test/somedir/*` to `~/.tendermint`
and to run a local node.
Be sure to set the db to "leveldb" to create a cswal file in `~/.tendermint/data/cswal`.
To generate the data, run `build.sh`. See that script for more details.
Make sure to adjust the stepChanges in the testCases if the number of messages changes.
This sometimes happens for the `small_block2.cswal`, where the number of block parts changes between 4 and 5.
If you need to change the signatures, you can use a script as follows:
The privBytes comes from `config/tendermint_test/...`:
@ -29,8 +30,7 @@ func main() {
privKey := crypto.PrivKeyEd25519{}
copy(privKey[:], privBytes)
signature := privKey.Sign(signBytes)
signatureEd25519 := signature.(crypto.SignatureEd25519)
fmt.Printf("Signature Bytes: %X\n", signatureEd25519[:])
fmt.Printf("Signature Bytes: %X\n", signature.Bytes())
}
```

+ 58
- 0
consensus/test_data/build.sh View File

@ -0,0 +1,58 @@
#! /bin/bash
cd $GOPATH/src/github.com/tendermint/tendermint
# specify a dir to copy
# NOTE: eventually we should replace with `tendermint init --test`
DIR=$HOME/.tendermint_test/consensus_state_test
# XXX: remove tendermint dir
rm -rf $HOME/.tendermint
cp -r $DIR $HOME/.tendermint
function reset(){
rm -rf $HOME/.tendermint/data
tendermint unsafe_reset_priv_validator
}
reset
# empty block
tendermint node --proxy_app=dummy &> /dev/null &
sleep 5
killall tendermint
# /q would print up to and including the match, then quit.
# /Q doesn't include the match.
# http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal
reset
# small block 1
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
sleep 5
killall tendermint
kill -9 $PID
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal
reset
# small block 2 (part size = 512)
echo "" >> ~/.tendermint/config.toml
echo "block_part_size = 512" >> ~/.tendermint/config.toml
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
sleep 5
killall tendermint
kill -9 $PID
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal
reset

+ 10
- 8
consensus/test_data/empty_block.cswal View File

@ -1,8 +1,10 @@
{"time":"2016-04-03T11:23:54.387Z","msg":[3,{"duration":972835254,"height":1,"round":0,"step":1}]}
{"time":"2016-04-03T11:23:54.388Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-04-03T11:23:54.388Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"pol_round":-1,"signature":"3A2ECD5023B21EC144EC16CFF1B992A4321317B83EEDD8969FDFEA6EB7BF4389F38DDA3E7BB109D63A07491C16277A197B241CF1F05F5E485C59882ECACD9E07"}}],"peer_key":""}]}
{"time":"2016-04-03T11:23:54.389Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011441D59F4B718AC00000000000000114C4B01D3810579550997AC5641E759E20D99B51C10001000100","proof":{"aunts":[]}}}],"peer_key":""}]}
{"time":"2016-04-03T11:23:54.390Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-04-03T11:23:54.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":1,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"47D2A75A4E2F15DB1F0D1B656AC0637AF9AADDFEB6A156874F6553C73895E5D5DC948DBAEF15E61276C5342D0E638DFCB77C971CD282096EA8735A564A90F008"}}],"peer_key":""}]}
{"time":"2016-04-03T11:23:54.392Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-04-03T11:23:54.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":2,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"39147DA595F08B73CF8C899967C8403B5872FD9042FFA4E239159E0B6C5D9665C9CA81D766EACA2AE658872F94C2FCD1E34BF51859CD5B274DA8512BACE4B50D"}}],"peer_key":""}]}
#HEIGHT: 1
{"time":"2016-12-18T05:05:33.502Z","msg":[3,{"duration":974084551,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"62C0F2BCCB491399EEDAF8E85837ADDD4E25BAB7A84BFC4F0E88594531FBC6D4755DEC7E6427F04AD7EB8BB89502762AB4380C7BBA93A4C297E6180EC78E3504"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.506Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F74657374010114914148D83E0DC00000000000000114354594CBFC1A7BCA1AD0050ED6AA010023EADA390001000100000000","proof":{"aunts":[]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.508Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-12-18T05:05:33.508Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"B64D0BB64B2E9AAFDD4EBEA679644F77AE774D69E3E2E1B042AB15FE4F84B1427AC6C8A25AFF58EA22011AE567FEA49D2EE7354382E915AD85BF40C58FA6130C"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"D83E968392D1BF09821E0D05079DAB5491CABD89BE128BD1CF573ED87148BA84667A56C0A069EFC90760F25EDAC62BC324DBB12EA63F44E6CB2D3500FE5E640F"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}

+ 10
- 8
consensus/test_data/small_block1.cswal
File diff suppressed because it is too large
View File


+ 14
- 10
consensus/test_data/small_block2.cswal View File

@ -1,10 +1,14 @@
{"time":"2016-10-11T16:21:23.438Z","msg":[3,{"duration":0,"height":1,"round":0,"step":1}]}
{"time":"2016-10-11T16:21:23.440Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-10-11T16:21:23.440Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":3,"hash":"88BC082C86DED0A5E2BBC3677B610D155FEDBCEA"},"pol_round":-1,"signature":"8F74F7032E50DFBC17E8B42DD15FD54858B45EEB1B8DAF6432AFBBB1333AC1E850290DE82DF613A10430EB723023527498D45C106FD2946FEF03A9C8B301020B"}}],"peer_key":""}]}
{"time":"2016-10-11T16:21:23.440Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F746573740101147C86B383BAB78001A60000000001148A3835062BB5E79BE490FAB65168D69BD716AD530114C4B01D3810579550997AC5641E759E20D99B51C1000101A6010F616263643139363D64636261313936010F616263643139373D64636261313937010F616263643139383D64636261313938010F616263643139393D64636261313939010F616263643230303D64636261323030010F616263643230313D64636261323031010F616263643230323D64636261323032010F616263643230333D64636261323033010F616263643230343D64636261323034010F616263643230353D64636261323035010F616263643230363D64636261323036010F616263643230373D64636261323037010F616263643230383D64636261323038010F616263643230393D64636261323039010F616263643231303D64636261323130010F616263643231313D64636261323131010F616263643231323D64636261323132010F616263643231333D64636261323133010F616263643231343D64636261323134010F616263643231353D64636261323135010F616263643231363D64636261323136010F616263643231373D64636261323137010F616263643231383D64636261323138010F616263643231393D64636261323139010F616263643232303D64636261323230010F616263643232313D64636261323231010F616263643232323D64636261323232010F616263643232333D64636261323233010F616263643232343D64636261323234010F616263643232353D64636261323235010F616263643232363D64636261323236010F616263643232373D64636261323237010F616263643232383D64636261323238010F616263643232393D64636261323239010F616263643233303D64636261323330010F616263643233313D64636261323331010F616263643233323D64636261323332010F616263643233333D64636261323333010F616263643233343D64636261323334010F616263643233353D64636261323335010F616263643233363D64636261323336010F616263643233373D64636261323337010F616263643233383D64636261323338010F616263643233393D64636261323339010F616263643234303D64636261323430010F616263643234313D64636261323431010F616263643234323D64636261323432010F616263643234333D64636261323433010F616263643234343D64636261323434010F616263643234353D64636261323435010F616263643234363D64636261323436010F616263643234373D64636261323437010F616263643234383D64636261323438010F616263643234393D64636261323439010F616263643235303D64636261323530010F61626364","proof":{"aunts":["22516491F7E1B5ADD8F12B309E9E8F6F04C034AB","C65A9589F377F2B6CF44B9BAFEBB535DF3C3A4FB"]}}}],"peer_key":""}]}
{"time":"2016-10-11T16:21:23.441Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"3235313D64636261323531010F616263643235323D64636261323532010F616263643235333D64636261323533010F616263643235343D64636261323534010F616263643235353D64636261323535010F616263643235363D64636261323536010F616263643235373D64636261323537010F616263643235383D64636261323538010F616263643235393D64636261323539010F616263643236303D64636261323630010F616263643236313D64636261323631010F616263643236323D64636261323632010F616263643236333D64636261323633010F616263643236343D64636261323634010F616263643236353D64636261323635010F616263643236363D64636261323636010F616263643236373D64636261323637010F616263643236383D64636261323638010F616263643236393D64636261323639010F616263643237303D64636261323730010F616263643237313D64636261323731010F616263643237323D64636261323732010F616263643237333D64636261323733010F616263643237343D64636261323734010F616263643237353D64636261323735010F616263643237363D64636261323736010F616263643237373D64636261323737010F616263643237383D64636261323738010F616263643237393D64636261323739010F616263643238303D64636261323830010F616263643238313D64636261323831010F616263643238323D64636261323832010F616263643238333D64636261323833010F616263643238343D64636261323834010F616263643238353D64636261323835010F616263643238363D64636261323836010F616263643238373D64636261323837010F616263643238383D64636261323838010F616263643238393D64636261323839010F616263643239303D64636261323930010F616263643239313D64636261323931010F616263643239323D64636261323932010F616263643239333D64636261323933010F616263643239343D64636261323934010F616263643239353D64636261323935010F616263643239363D64636261323936010F616263643239373D64636261323937010F616263643239383D64636261323938010F616263643239393D64636261323939010F616263643330303D64636261333030010F616263643330313D64636261333031010F616263643330323D64636261333032010F616263643330333D64636261333033010F616263643330343D64636261333034010F616263643330353D64636261333035010F616263643330363D64636261333036010F616263643330373D64636261333037010F616263643330383D64636261333038010F616263643330393D64636261333039010F616263643331303D64636261333130010F616263643331313D","proof":{"aunts":["F730990451BAB63C3CF6AC8E6ED4F52259CA5F53","C65A9589F377F2B6CF44B9BAFEBB535DF3C3A4FB"]}}}],"peer_key":""}]}
{"time":"2016-10-11T16:21:23.441Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"64636261333131010F616263643331323D64636261333132010F616263643331333D64636261333133010F616263643331343D64636261333134010F616263643331353D64636261333135010F616263643331363D64636261333136010F616263643331373D64636261333137010F616263643331383D64636261333138010F616263643331393D64636261333139010F616263643332303D64636261333230010F616263643332313D64636261333231010F616263643332323D64636261333232010F616263643332333D64636261333233010F616263643332343D64636261333234010F616263643332353D64636261333235010F616263643332363D64636261333236010F616263643332373D64636261333237010F616263643332383D64636261333238010F616263643332393D64636261333239010F616263643333303D64636261333330010F616263643333313D64636261333331010F616263643333323D64636261333332010F616263643333333D64636261333333010F616263643333343D64636261333334010F616263643333353D64636261333335010F616263643333363D64636261333336010F616263643333373D64636261333337010F616263643333383D64636261333338010F616263643333393D64636261333339010F616263643334303D64636261333430010F616263643334313D64636261333431010F616263643334323D64636261333432010F616263643334333D64636261333433010F616263643334343D64636261333434010F616263643334353D64636261333435010F616263643334363D64636261333436010F616263643334373D64636261333437010F616263643334383D64636261333438010F616263643334393D64636261333439010F616263643335303D64636261333530010F616263643335313D64636261333531010F616263643335323D64636261333532010F616263643335333D64636261333533010F616263643335343D64636261333534010F616263643335353D64636261333535010F616263643335363D64636261333536010F616263643335373D64636261333537010F616263643335383D64636261333538010F616263643335393D64636261333539010F616263643336303D64636261333630010F616263643336313D646362613336310100","proof":{"aunts":["56EF782EE04E0359D0B38271FD22B312A546FC3A"]}}}],"peer_key":""}]}
{"time":"2016-10-11T16:21:23.447Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-10-11T16:21:23.447Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":1,"block_hash":"AAE0ECF64D818A61F6E3D6D11E60F343C3FC8800","block_parts_header":{"total":3,"hash":"88BC082C86DED0A5E2BBC3677B610D155FEDBCEA"},"signature":"0870A9C3FF59DE0F5574B77F030BD160C1E2966AECE815E7C97CFA8BC4A6B01D7A10D91416B1AA02D49EFF7F08A239048CD9CD93E7AE4F80871FBFFF7DBFC50C"}}],"peer_key":""}]}
{"time":"2016-10-11T16:21:23.448Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-10-11T16:21:23.448Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":2,"block_hash":"AAE0ECF64D818A61F6E3D6D11E60F343C3FC8800","block_parts_header":{"total":3,"hash":"88BC082C86DED0A5E2BBC3677B610D155FEDBCEA"},"signature":"0CEEA8A987D88D0A0870C0076DB8D1B57D3B051D017745B46C4710BBE6DF0F9AE8D5A95B49E4158A1A8C8C6475B8A8E91275303B9C10A5C0C18F40EBB0DA0905"}}],"peer_key":""}]}
#HEIGHT: 1
{"time":"2016-12-18T05:05:43.641Z","msg":[3,{"duration":969409681,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"F1A8E9928889C68FD393F3983B5362AECA4A95AA13FE3C78569B2515EC046893CB718071CAF54F3F1507DCD851B37CD5557EA17BB5471D2DC6FB5AC5FBB72E02"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011491414B3483A8400190000000000114926EA77D30A4D19866159DE7E58AA9461F90F9D10114354594CBFC1A7BCA1AD0050ED6AA010023EADA3900010190010D6162636431323D646362613132010D6162636431333D646362613133010D6162636431343D646362613134010D6162636431353D646362613135010D6162636431363D646362613136010D6162636431373D646362613137010D6162636431383D646362613138010D6162636431393D646362613139010D6162636432303D646362613230010D6162636432313D646362613231010D6162636432323D646362613232010D6162636432333D646362613233010D6162636432343D646362613234010D6162636432353D646362613235010D6162636432363D646362613236010D6162636432373D646362613237010D6162636432383D646362613238010D6162636432393D646362613239010D6162636433303D646362613330010D6162636433313D646362613331010D6162636433323D646362613332010D6162636433333D646362613333010D6162636433343D646362613334010D6162636433353D646362613335010D6162636433363D646362613336010D6162636433373D646362613337010D6162636433383D646362613338010D6162636433393D646362613339010D6162636434303D","proof":{"aunts":["C9FBD66B63A976638196323F5B93494BDDFC9EED","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"646362613430010D6162636434313D646362613431010D6162636434323D646362613432010D6162636434333D646362613433010D6162636434343D646362613434010D6162636434353D646362613435010D6162636434363D646362613436010D6162636434373D646362613437010D6162636434383D646362613438010D6162636434393D646362613439010D6162636435303D646362613530010D6162636435313D646362613531010D6162636435323D646362613532010D6162636435333D646362613533010D6162636435343D646362613534010D6162636435353D646362613535010D6162636435363D646362613536010D6162636435373D646362613537010D6162636435383D646362613538010D6162636435393D646362613539010D6162636436303D646362613630010D6162636436313D646362613631010D6162636436323D646362613632010D6162636436333D646362613633010D6162636436343D646362613634010D6162636436353D646362613635010D6162636436363D646362613636010D6162636436373D646362613637010D6162636436383D646362613638010D6162636436393D646362613639010D6162636437303D646362613730010D6162636437313D646362613731010D6162636437323D646362613732010D6162636437333D646362613733010D6162636437343D6463","proof":{"aunts":["D7FB03B935B77C322064F8277823CDB5C7018597","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"62613734010D6162636437353D646362613735010D6162636437363D646362613736010D6162636437373D646362613737010D6162636437383D646362613738010D6162636437393D646362613739010D6162636438303D646362613830010D6162636438313D646362613831010D6162636438323D646362613832010D6162636438333D646362613833010D6162636438343D646362613834010D6162636438353D646362613835010D6162636438363D646362613836010D6162636438373D646362613837010D6162636438383D646362613838010D6162636438393D646362613839010D6162636439303D646362613930010D6162636439313D646362613931010D6162636439323D646362613932010D6162636439333D646362613933010D6162636439343D646362613934010D6162636439353D646362613935010D6162636439363D646362613936010D6162636439373D646362613937010D6162636439383D646362613938010D6162636439393D646362613939010F616263643130303D64636261313030010F616263643130313D64636261313031010F616263643130323D64636261313032010F616263643130333D64636261313033010F616263643130343D64636261313034010F616263643130353D64636261313035010F616263643130363D64636261313036010F616263643130373D64636261","proof":{"aunts":["A607D9BF5107E6C9FD19B6928D9CC7714B0730E4","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":3,"bytes":"313037010F616263643130383D64636261313038010F616263643130393D64636261313039010F616263643131303D64636261313130010F616263643131313D64636261313131010F616263643131323D64636261313132010F616263643131333D64636261313133010F616263643131343D64636261313134010F616263643131353D64636261313135010F616263643131363D64636261313136010F616263643131373D64636261313137010F616263643131383D64636261313138010F616263643131393D64636261313139010F616263643132303D64636261313230010F616263643132313D64636261313231010F616263643132323D64636261313232010F616263643132333D64636261313233010F616263643132343D64636261313234010F616263643132353D64636261313235010F616263643132363D64636261313236010F616263643132373D64636261313237010F616263643132383D64636261313238010F616263643132393D64636261313239010F616263643133303D64636261313330010F616263643133313D64636261313331010F616263643133323D64636261313332010F616263643133333D64636261313333010F616263643133343D64636261313334010F616263643133353D64636261313335010F616263643133363D64636261313336010F616263643133373D646362613133","proof":{"aunts":["0FD794B3506B9E92CDE3703F7189D42167E77095","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":4,"bytes":"37010F616263643133383D64636261313338010F616263643133393D64636261313339010F616263643134303D64636261313430010F616263643134313D64636261313431010F616263643134323D64636261313432010F616263643134333D64636261313433010F616263643134343D64636261313434010F616263643134353D64636261313435010F616263643134363D64636261313436010F616263643134373D64636261313437010F616263643134383D64636261313438010F616263643134393D64636261313439010F616263643135303D64636261313530010F616263643135313D64636261313531010F616263643135323D64636261313532010F616263643135333D64636261313533010F616263643135343D64636261313534010F616263643135353D646362613135350100000000","proof":{"aunts":["50CBDC078A660EAE3442BA355BE10EE0D04408D1","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.645Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-12-18T05:05:43.645Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"E815E0A63B7EEE7894DE2D72372A7C393434AC8ACCC46B60C628910F73351806D55A59994F08B454BFD71EDAA0CA95733CA47E37FFDAF9AAA2431A8160176E01"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.647Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-12-18T05:05:43.647Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"9AAC3F3A118EE039EB460E9E5308D490D671C7490309BD5D62B5F392205C7E420DFDAF90F08294FF36BE8A9AA5CC203C1F2088B42D2BB8EE40A45F2BB5C54D0A"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.648Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}

+ 127
- 0
consensus/ticker.go View File

@ -0,0 +1,127 @@
package consensus
import (
"time"
. "github.com/tendermint/go-common"
)
var (
tickTockBufferSize = 10
)
// TimeoutTicker is a timer that schedules timeouts
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start() (bool, error)
Stop() bool
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
}
// timeoutTicker wraps time.Timer,
// scheduling timeouts only for greater height/round/step
// than what it's already seen.
// Timeouts are scheduled along the tickChan,
// and fired on the tockChan.
type timeoutTicker struct {
BaseService
timer *time.Timer
tickChan chan timeoutInfo
tockChan chan timeoutInfo
}
func NewTimeoutTicker() TimeoutTicker {
tt := &timeoutTicker{
timer: time.NewTimer(0),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.stopTimer() // don't want to fire until the first scheduled timeout
tt.BaseService = *NewBaseService(log, "TimeoutTicker", tt)
return tt
}
func (t *timeoutTicker) OnStart() error {
t.BaseService.OnStart()
go t.timeoutRoutine()
return nil
}
func (t *timeoutTicker) OnStop() {
t.BaseService.OnStop()
t.stopTimer()
}
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
return t.tockChan
}
// The timeoutRoutine is alwaya available to read from tickChan (it won't block).
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
t.tickChan <- ti
}
//-------------------------------------------------------------
// stop the timer and drain if necessary
func (t *timeoutTicker) stopTimer() {
// Stop() returns false if it was already fired or was stopped
if !t.timer.Stop() {
select {
case <-t.timer.C:
default:
log.Debug("Timer already stopped")
}
}
}
// send on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
log.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
log.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// ignore tickers for old height/round/step
if newti.Height < ti.Height {
continue
} else if newti.Height == ti.Height {
if newti.Round < ti.Round {
continue
} else if newti.Round == ti.Round {
if ti.Step > 0 && newti.Step <= ti.Step {
continue
}
}
}
// stop the last timer
t.stopTimer()
// update timeoutInfo and reset timer
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
log.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
case <-t.timer.C:
log.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// go routine here gaurantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
case <-t.Quit:
return
}
}
}

+ 59
- 92
consensus/wal.go View File

@ -1,10 +1,9 @@
package consensus
import (
"bufio"
"os"
"time"
auto "github.com/tendermint/go-autofile"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
@ -13,15 +12,15 @@ import (
//--------------------------------------------------------
// types and functions for savings consensus messages
type ConsensusLogMessage struct {
Time time.Time `json:"time"`
Msg ConsensusLogMessageInterface `json:"msg"`
type TimedWALMessage struct {
Time time.Time `json:"time"`
Msg WALMessage `json:"msg"`
}
type ConsensusLogMessageInterface interface{}
type WALMessage interface{}
var _ = wire.RegisterInterface(
struct{ ConsensusLogMessageInterface }{},
struct{ WALMessage }{},
wire.ConcreteType{types.EventDataRoundState{}, 0x01},
wire.ConcreteType{msgInfo{}, 0x02},
wire.ConcreteType{timeoutInfo{}, 0x03},
@ -35,111 +34,79 @@ var _ = wire.RegisterInterface(
// TODO: currently the wal is overwritten during replay catchup
// give it a mode so it's either reading or appending - must read to end to start appending again
type WAL struct {
fp *os.File
exists bool // if the file already existed (restarted process)
done chan struct{}
BaseService
group *auto.Group
light bool // ignore block parts
}
func NewWAL(file string, light bool) (*WAL, error) {
var walExists bool
if _, err := os.Stat(file); err == nil {
walExists = true
}
fp, err := os.OpenFile(file, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
func NewWAL(walDir string, light bool) (*WAL, error) {
group, err := auto.OpenGroup(walDir + "/wal")
if err != nil {
return nil, err
}
return &WAL{
fp: fp,
exists: walExists,
done: make(chan struct{}),
light: light,
}, nil
wal := &WAL{
group: group,
light: light,
}
wal.BaseService = *NewBaseService(log, "WAL", wal)
_, err = wal.Start()
return wal, err
}
func (wal *WAL) Exists() bool {
if wal == nil {
log.Warn("consensus msg log is nil")
return false
func (wal *WAL) OnStart() error {
wal.BaseService.OnStart()
size, err := wal.group.Head.Size()
if err != nil {
return err
} else if size == 0 {
wal.writeHeight(1)
}
return wal.exists
_, err = wal.group.Start()
return err
}
func (wal *WAL) OnStop() {
wal.BaseService.OnStop()
wal.group.Stop()
}
// called in newStep and for each pass in receiveRoutine
func (wal *WAL) Save(clm ConsensusLogMessageInterface) {
if wal != nil {
if wal.light {
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
if mi, ok := clm.(msgInfo); ok {
_ = mi
if mi.PeerKey != "" {
return
}
func (wal *WAL) Save(wmsg WALMessage) {
if wal == nil {
return
}
if wal.light {
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
if mi, ok := wmsg.(msgInfo); ok {
if mi.PeerKey != "" {
return
}
}
var n int
var err error
wire.WriteJSON(ConsensusLogMessage{time.Now(), clm}, wal.fp, &n, &err)
wire.WriteTo([]byte("\n"), wal.fp, &n, &err) // one message per line
if err != nil {
PanicQ(Fmt("Error writing msg to consensus wal. Error: %v \n\nMessage: %v", err, clm))
}
}
}
// Must not be called concurrently with a write.
func (wal *WAL) Close() {
if wal != nil {
wal.fp.Close()
// Write #HEIGHT: XYZ if new height
if edrs, ok := wmsg.(types.EventDataRoundState); ok {
if edrs.Step == RoundStepNewHeight.String() {
wal.writeHeight(edrs.Height)
}
}
wal.done <- struct{}{}
}
func (wal *WAL) Wait() {
<-wal.done
}
func (wal *WAL) SeekFromEnd(found func([]byte) bool) (nLines int, err error) {
var current int64
// start at the end
current, err = wal.fp.Seek(0, 2)
// Write the wal message
var wmsgBytes = wire.JSONBytes(TimedWALMessage{time.Now(), wmsg})
err := wal.group.WriteLine(string(wmsgBytes))
if err != nil {
return
PanicQ(Fmt("Error writing msg to consensus wal. Error: %v \n\nMessage: %v", err, wmsg))
}
// TODO: only flush when necessary
if err := wal.group.Flush(); err != nil {
PanicQ(Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
}
}
// backup until we find the the right line
// current is how far we are from the beginning
for {
current -= 1
if current < 0 {
wal.fp.Seek(0, 0) // back to beginning
return
}
// backup one and read a new byte
if _, err = wal.fp.Seek(current, 0); err != nil {
return
}
b := make([]byte, 1)
if _, err = wal.fp.Read(b); err != nil {
return
}
if b[0] == '\n' || len(b) == 0 {
nLines += 1
// read a full line
reader := bufio.NewReader(wal.fp)
lineBytes, _ := reader.ReadBytes('\n')
if len(lineBytes) == 0 {
continue
}
func (wal *WAL) writeHeight(height int) {
wal.group.WriteLine(Fmt("#HEIGHT: %v", height))
if found(lineBytes) {
wal.fp.Seek(0, 1) // (?)
wal.fp.Seek(current, 0)
return
}
}
// TODO: only flush when necessary
if err := wal.group.Flush(); err != nil {
PanicQ(Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
}
}

+ 0
- 78
consensus/wal_test.go View File

@ -1,78 +0,0 @@
package consensus
import (
"io/ioutil"
"os"
"path"
"strings"
"testing"
. "github.com/tendermint/go-common"
)
var testTxt = `{"time":"2016-01-16T04:42:00.390Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-01-16T04:42:00.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":1,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"4CC6845A128E723A299B470CCBB2A158612AA51321447F6492F3DA57D135C27FCF4124B3B19446A248252BDA45B152819C76AAA5FD35E1C07091885CE6955E05"}}],"peer_key":""}]}
{"time":"2016-01-16T04:42:00.392Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-01-16T04:42:00.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":2,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"1B9924E010F47E0817695DFE462C531196E5A12632434DE12180BBA3EFDAD6B3960FDB9357AFF085EB61729A7D4A6AD8408555D7569C87D9028F280192FD4E05"}}],"peer_key":""}]}
{"time":"2016-01-16T04:42:00.393Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepCommit"}]}
{"time":"2016-01-16T04:42:00.395Z","msg":[1,{"height":28220,"round":0,"step":"RoundStepNewHeight"}]}`
func TestSeek(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "seek_test_")
if err != nil {
panic(err)
}
stat, _ := f.Stat()
name := stat.Name()
_, err = f.WriteString(testTxt)
if err != nil {
panic(err)
}
f.Close()
wal, err := NewWAL(path.Join(os.TempDir(), name), config.GetBool("cswal_light"))
if err != nil {
panic(err)
}
keyWord := "Precommit"
n, err := wal.SeekFromEnd(func(b []byte) bool {
if strings.Contains(string(b), keyWord) {
return true
}
return false
})
if err != nil {
panic(err)
}
// confirm n
spl := strings.Split(testTxt, "\n")
var i int
var s string
for i, s = range spl {
if strings.Contains(s, keyWord) {
break
}
}
// n is lines from the end.
spl = spl[i:]
if n != len(spl) {
panic(Fmt("Wrong nLines. Got %d, expected %d", n, len(spl)))
}
b, err := ioutil.ReadAll(wal.fp)
if err != nil {
panic(err)
}
// first char is a \n
spl2 := strings.Split(strings.Trim(string(b), "\n"), "\n")
for i, s := range spl {
if s != spl2[i] {
panic(Fmt("Mismatch. Got %s, expected %s", spl2[i], s))
}
}
}

+ 51
- 35
glide.lock View File

@ -1,36 +1,38 @@
hash: d87a1fe0061d41c1e6ec78d405d54ae321e75f4bff22b38d19d3255bbd17f21e
updated: 2016-09-10T18:02:24.023038691-04:00
hash: dcaf3fb1290b0d7942c86f0644a7431ac313247936eab9515b1ade9ffe579848
updated: 2017-01-13T00:30:55.237750829-05:00
imports:
- name: github.com/btcsuite/btcd
version: 2ef82e7db35dc8c499fa9091d768dc99bbaff893
version: 153dca5c1e4b5d1ea1523592495e5bedfa503391
subpackages:
- btcec
- name: github.com/btcsuite/fastsha256
version: 637e656429416087660c84436a2a035d69d54e2e
- name: github.com/BurntSushi/toml
version: 99064174e013895bbd9b025c31100bd1d9b590ca
- name: github.com/ebuchman/fail-test
version: c1eddaa09da2b4017351245b0d43234955276798
- name: github.com/go-stack/stack
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
- name: github.com/gogo/protobuf
version: a11c89fbb0ad4acfa8abc4a4d5f7e27c477169b1
version: f9114dace7bd920b32f943b3c73fafbcbab2bf31
subpackages:
- proto
- name: github.com/golang/protobuf
version: 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
version: 8ee79997227bf9b34611aee7946ae64735e6fd93
subpackages:
- proto
- name: github.com/golang/snappy
version: d9eb7a3d35ec988b8585d4a0068e462c27d28380
- name: github.com/gorilla/websocket
version: a69d25be2fe2923a97c2af6849b2f52426f68fc0
version: 17634340a83afe0cab595e40fbc63f6ffa1d8915
- name: github.com/jmhodges/levigo
version: c42d9e0ca023e2198120196f842701bb4c55d7b9
- name: github.com/mattn/go-colorable
version: ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8
version: d228849504861217f796da67fae4f6e347643f15
- name: github.com/mattn/go-isatty
version: 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8
version: 30a891c33c7cde7b02a981314b4228ec99380cca
- name: github.com/spf13/pflag
version: 6fd2ff4ff8dfcdf5556fbdc0ac0284408274b1a7
version: 25f8b5b07aece3207895bf19f7ab517eb3b22a40
- name: github.com/syndtr/goleveldb
version: 6ae1797c0b42b9323fc27ff7dcf568df88f2f33d
version: 23851d93a2292dcc56e71a18ec9e0624d84a0f65
subpackages:
- leveldb
- leveldb/cache
@ -44,17 +46,26 @@ imports:
- leveldb/storage
- leveldb/table
- leveldb/util
- name: github.com/tendermint/abci
version: 699d45bc678865b004b90213bf88a950f420973b
subpackages:
- client
- example/counter
- example/dummy
- example/nil
- server
- types
- name: github.com/tendermint/ed25519
version: 1f52c6f8b8a5c7908aff4497c186af344b428925
subpackages:
- edwards25519
- extra25519
- name: github.com/tendermint/go-flowrate
version: a20c98e61957faa93b4014fbd902f20ab9317a6a
- name: github.com/tendermint/go-autofile
version: 0416e0aa9c68205aa44844096f9f151ada9d0405
- name: github.com/tendermint/go-clist
version: 3baa390bbaf7634251c42ad69a8682e7e3990552
- name: github.com/tendermint/go-common
version: 47e06734f6ee488cc2e61550a38642025e1d4227
version: e289af53b6bf6af28da129d9ef64389a4cf7987f
subpackages:
- test
- name: github.com/tendermint/go-config
@ -62,40 +73,35 @@ imports:
- name: github.com/tendermint/go-crypto
version: 4b11d62bdb324027ea01554e5767b71174680ba0
- name: github.com/tendermint/go-db
version: 31fdd21c7eaeed53e0ea7ca597fb1e960e2988a5
version: 72f6dacd22a686cdf7fcd60286503e3aceda77ba
- name: github.com/tendermint/go-events
version: 1652dc8b3f7780079aa98c3ce20a83ee90b9758b
version: fddee66d90305fccb6f6d84d16c34fa65ea5b7f6
- name: github.com/tendermint/go-flowrate
version: a20c98e61957faa93b4014fbd902f20ab9317a6a
subpackages:
- flowrate
- name: github.com/tendermint/go-logger
version: cefb3a45c0bf3c493a04e9bcd9b1540528be59f2
- name: github.com/tendermint/go-merkle
version: 05042c6ab9cad51d12e4cecf717ae68e3b1409a8
version: 7a86b4486f2cd84ac885c5bbc609fdee2905f5d1
- name: github.com/tendermint/go-p2p
version: eab2baa363de01b052b88c559e803776cd2c7dd6
version: 3d98f675f30dc4796546b8b890f895926152fa8d
subpackages:
- upnp
- name: github.com/tendermint/go-rpc
version: 855255d73eecd25097288be70f3fb208a5817d80
version: fcea0cda21f64889be00a0f4b6d13266b1a76ee7
subpackages:
- client
- server
- types
- name: github.com/tendermint/go-wire
version: 3b0adbc86ed8425eaed98516165b6788d9f4de7a
version: 2f3b7aafe21c80b19b6ee3210ecb3e3d07c7a471
- name: github.com/tendermint/log15
version: 9545b249b3aacafa97f79e0838b02b274adc6f5f
version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6
subpackages:
- term
- name: github.com/tendermint/tmsp
version: 5d3eb0328a615ba55b580ce871033e605aa8b97d
subpackages:
- client
- example/counter
- example/dummy
- example/nil
- server
- types
- name: golang.org/x/crypto
version: aa2481cbfe81d911eb62b642b7a6b5ec58bbea71
version: 7c6cc321c680f03b9ef0764448e780704f486b51
subpackages:
- curve25519
- nacl/box
@ -106,20 +112,28 @@ imports:
- ripemd160
- salsa20/salsa
- name: golang.org/x/net
version: cfe3c2a7525b50c3d707256e371c90938cfef98a
version: 60c41d1de8da134c05b7b40154a9a82bf5b7edb9
subpackages:
- context
- http2
- http2/hpack
- idna
- internal/timeseries
- lex/httplex
- trace
- name: golang.org/x/sys
version: 30de6d19a3bd89a5f38ae4028e23aaa5582648af
version: d75a52659825e75fff6158388dddc6a5b04f9ba5
subpackages:
- unix
- name: golang.org/x/text
version: 44f4f658a783b0cee41fe0a23b8fc91d9c120558
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/grpc
version: 28707e14b1d2b2f5da81474dea2790d71e526987
version: 50955793b0183f9de69bd78e2ec251cf20aab121
subpackages:
- codes
- credentials
@ -128,5 +142,7 @@ imports:
- metadata
- naming
- peer
- stats
- tap
- transport
testImports: []

+ 3
- 12
glide.yaml View File

@ -6,6 +6,8 @@ import:
- package: github.com/gorilla/websocket
- package: github.com/spf13/pflag
- package: github.com/tendermint/ed25519
- package: github.com/tendermint/go-flowrate
- package: github.com/tendermint/go-autofile
- package: github.com/tendermint/go-clist
- package: github.com/tendermint/go-common
- package: github.com/tendermint/go-config
@ -15,21 +17,10 @@ import:
- package: github.com/tendermint/go-logger
- package: github.com/tendermint/go-merkle
- package: github.com/tendermint/go-p2p
subpackages:
- upnp
- package: github.com/tendermint/go-rpc
subpackages:
- client
- server
- types
- package: github.com/tendermint/go-wire
- package: github.com/tendermint/log15
- package: github.com/tendermint/tmsp
subpackages:
- client
- example/dummy
- example/nil
- types
- package: github.com/tendermint/abci
- package: golang.org/x/crypto
subpackages:
- ripemd160


+ 27
- 21
mempool/mempool.go View File

@ -7,12 +7,13 @@ import (
"sync/atomic"
"time"
auto "github.com/tendermint/go-autofile"
"github.com/tendermint/go-clist"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
abci "github.com/tendermint/abci/types"
)
/*
@ -39,7 +40,7 @@ Garbage collection of old elements from mempool.txs is handlde via
the DetachPrev() call, which makes old elements not reachable by
peer broadcastTxRoutine() automatically garbage collected.
TODO: Better handle tmsp client errors. (make it automatically handle connection errors)
TODO: Better handle abci client errors. (make it automatically handle connection errors)
*/
@ -62,7 +63,7 @@ type Mempool struct {
cache *txCache
// A log of mempool txs
wal *AutoFile
wal *auto.AutoFile
}
func NewMempool(config cfg.Config, proxyAppConn proxy.AppConnMempool) *Mempool {
@ -84,10 +85,16 @@ func NewMempool(config cfg.Config, proxyAppConn proxy.AppConnMempool) *Mempool {
}
func (mem *Mempool) initWAL() {
walFileName := mem.config.GetString("mempool_wal")
if walFileName != "" {
af, err := OpenAutoFile(walFileName)
walDir := mem.config.GetString("mempool_wal_dir")
if walDir != "" {
err := EnsureDir(walDir, 0700)
if err != nil {
log.Error("Error ensuring Mempool wal dir", "error", err)
PanicSanity(err)
}
af, err := auto.OpenAutoFile(walDir + "/wal")
if err != nil {
log.Error("Error opening Mempool wal file", "error", err)
PanicSanity(err)
}
mem.wal = af
@ -132,17 +139,17 @@ func (mem *Mempool) TxsFrontWait() *clist.CElement {
// cb: A callback from the CheckTx command.
// It gets called from another goroutine.
// CONTRACT: Either cb will get called, or err returned.
func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
// CACHE
if mem.cache.Exists(tx) {
if cb != nil {
cb(&tmsp.Response{
Value: &tmsp.Response_CheckTx{
&tmsp.ResponseCheckTx{
Code: tmsp.CodeType_BadNonce, // TODO or duplicate tx
cb(&abci.Response{
Value: &abci.Response_CheckTx{
&abci.ResponseCheckTx{
Code: abci.CodeType_BadNonce, // TODO or duplicate tx
Log: "Duplicate transaction (ignored)",
},
},
@ -173,8 +180,8 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
return nil
}
// TMSP callback function
func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
// ABCI callback function
func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) {
if mem.recheckCursor == nil {
mem.resCbNormal(req, res)
} else {
@ -182,10 +189,10 @@ func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
}
}
func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *tmsp.Response_CheckTx:
if r.CheckTx.Code == tmsp.CodeType_OK {
case *abci.Response_CheckTx:
if r.CheckTx.Code == abci.CodeType_OK {
mem.counter++
memTx := &mempoolTx{
counter: mem.counter,
@ -207,15 +214,15 @@ func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
}
}
func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *tmsp.Response_CheckTx:
case *abci.Response_CheckTx:
memTx := mem.recheckCursor.Value.(*mempoolTx)
if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) {
PanicSanity(Fmt("Unexpected tx response from proxy during recheck\n"+
"Expected %X, got %X", r.CheckTx.Data, memTx.tx))
}
if r.CheckTx.Code == tmsp.CodeType_OK {
if r.CheckTx.Code == abci.CodeType_OK {
// Good, nothing to do.
} else {
// Tx became invalidated due to newly committed block.
@ -275,8 +282,7 @@ func (mem *Mempool) collectTxs(maxTxs int) []types.Tx {
// NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller
func (mem *Mempool) Update(height int, txs []types.Tx) {
// mem.proxyMtx.Lock()
// defer mem.proxyMtx.Unlock()
// TODO: check err ?
mem.proxyAppConn.FlushSync() // To flush async resCb calls e.g. from CheckTx
// First, create a lookup map of txns in new txs.


+ 13
- 13
mempool/mempool_test.go View File

@ -7,7 +7,7 @@ import (
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmsp/example/counter"
"github.com/tendermint/abci/example/counter"
)
func TestSerialReap(t *testing.T) {
@ -16,12 +16,12 @@ func TestSerialReap(t *testing.T) {
app := counter.NewCounterApplication(true)
app.SetOption("serial", "on")
cc := proxy.NewLocalClientCreator(app)
appConnMem, _ := cc.NewTMSPClient()
appConnCon, _ := cc.NewTMSPClient()
appConnMem, _ := cc.NewABCIClient()
appConnCon, _ := cc.NewABCIClient()
mempool := NewMempool(config, appConnMem)
appendTxsRange := func(start, end int) {
// Append some txs.
deliverTxsRange := func(start, end int) {
// Deliver some txs.
for i := start; i < end; i++ {
// This will succeed
@ -61,11 +61,11 @@ func TestSerialReap(t *testing.T) {
}
commitRange := func(start, end int) {
// Append some txs.
// Deliver some txs.
for i := start; i < end; i++ {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
res := appConnCon.AppendTxSync(txBytes)
res := appConnCon.DeliverTxSync(txBytes)
if !res.IsOK() {
t.Errorf("Error committing tx. Code:%v result:%X log:%v",
res.Code, res.Data, res.Log)
@ -79,8 +79,8 @@ func TestSerialReap(t *testing.T) {
//----------------------------------------
// Append some txs.
appendTxsRange(0, 100)
// Deliver some txs.
deliverTxsRange(0, 100)
// Reap the txs.
reapCheck(100)
@ -88,9 +88,9 @@ func TestSerialReap(t *testing.T) {
// Reap again. We should get the same amount
reapCheck(100)
// Append 0 to 999, we should reap 900 new txs
// Deliver 0 to 999, we should reap 900 new txs
// because 100 were already counted.
appendTxsRange(0, 1000)
deliverTxsRange(0, 1000)
// Reap the txs.
reapCheck(1000)
@ -105,8 +105,8 @@ func TestSerialReap(t *testing.T) {
// We should have 500 left.
reapCheck(500)
// Append 100 invalid txs and 100 valid txs
appendTxsRange(900, 1100)
// Deliver 100 invalid txs and 100 valid txs
deliverTxsRange(900, 1100)
// We should have 600 now.
reapCheck(600)


+ 2
- 2
mempool/reactor.go View File

@ -12,7 +12,7 @@ import (
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
abci "github.com/tendermint/abci/types"
)
const (
@ -85,7 +85,7 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
}
// Just an alias for CheckTx since broadcasting happens in peer routines
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*tmsp.Response)) error {
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) error {
return memR.Mempool.CheckTx(tx, cb)
}


+ 29
- 39
node/node.go View File

@ -21,6 +21,7 @@ import (
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core"
grpccore "github.com/tendermint/tendermint/rpc/grpc"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
@ -52,8 +53,6 @@ func NewNodeDefault(config cfg.Config) *Node {
func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator) *Node {
EnsureDir(config.GetString("db_dir"), 0700) // incase we use memdb, cswal still gets written here
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStore := bc.NewBlockStore(blockStoreDB)
@ -62,11 +61,10 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
// Get State
state := getState(config, stateDB)
state := sm.GetState(config, stateDB)
// Create the proxyApp, which houses three connections:
// query, consensus, and mempool
proxyApp := proxy.NewAppConns(config, clientCreator, state, blockStore)
// Create the proxyApp, which manages connections (consensus, mempool, query)
proxyApp := proxy.NewAppConns(config, clientCreator, sm.NewHandshaker(config, state, blockStore))
if _, err := proxyApp.Start(); err != nil {
Exit(Fmt("Error starting proxy app connections: %v", err))
}
@ -96,7 +94,7 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
}
// Make BlockchainReactor
bcReactor := bc.NewBlockchainReactor(state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
bcReactor := bc.NewBlockchainReactor(config, state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
// Make MempoolReactor
mempool := mempl.NewMempool(config, proxyApp.Mempool())
@ -104,10 +102,10 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
// Make ConsensusReactor
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusReactor := consensus.NewConsensusReactor(consensusState, blockStore, fastSync)
if privValidator != nil {
consensusReactor.SetPrivValidator(privValidator)
consensusState.SetPrivValidator(privValidator)
}
consensusReactor := consensus.NewConsensusReactor(consensusState, fastSync)
// Make p2p network switch
sw := p2p.NewSwitch(config.GetConfig("p2p"))
@ -124,7 +122,7 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
sw.AddReactor("PEX", pexReactor)
}
// filter peers by addr or pubkey with a tmsp query.
// filter peers by addr or pubkey with a abci query.
// if the query return code is OK, add peer
// XXX: query format subject to change
if config.GetBool("filter_peers") {
@ -230,6 +228,17 @@ func (n *Node) StartRPC() ([]net.Listener, error) {
}
listeners[i] = listener
}
// we expose a simplified api over grpc for convenience to app devs
grpcListenAddr := n.config.GetString("grpc_laddr")
if grpcListenAddr != "" {
listener, err := grpccore.StartGRPCServer(grpcListenAddr)
if err != nil {
return nil, err
}
listeners = append(listeners, listener)
}
return listeners, nil
}
@ -307,22 +316,11 @@ func makeNodeInfo(config cfg.Config, sw *p2p.Switch, privKey crypto.PrivKeyEd255
return nodeInfo
}
// Load the most recent state from "state" db,
// or create a new one (and save) from genesis.
func getState(config cfg.Config, stateDB dbm.DB) *sm.State {
state := sm.LoadState(stateDB)
if state == nil {
state = sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
state.Save()
}
return state
}
//------------------------------------------------------------------------------
// Users wishing to:
// * use an external signer for their validators
// * supply an in-proc tmsp app
// * supply an in-proc abci app
// should fork tendermint/tendermint and implement RunNode to
// call NewNode with their custom priv validator and/or custom
// proxy.ClientCreator interface
@ -402,17 +400,19 @@ func newConsensusState(config cfg.Config) *consensus.ConsensusState {
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
// Create two proxyAppConn connections,
// one for the consensus and one for the mempool.
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), state, blockStore)
// Create proxyAppConn connection (consensus, mempool, query)
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), sm.NewHandshaker(config, state, blockStore))
_, err := proxyApp.Start()
if err != nil {
Exit(Fmt("Error starting proxy app conns: %v", err))
}
// add the chainid to the global config
config.Set("chain_id", state.ChainID)
// Make event switch
eventSwitch := types.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
if _, err := eventSwitch.Start(); err != nil {
Exit(Fmt("Failed to start event switch: %v", err))
}
@ -423,12 +423,7 @@ func newConsensusState(config cfg.Config) *consensus.ConsensusState {
return consensusState
}
func RunReplayConsole(config cfg.Config) {
walFile := config.GetString("cswal")
if walFile == "" {
Exit("cswal file name not set in tendermint config")
}
func RunReplayConsole(config cfg.Config, walFile string) {
consensusState := newConsensusState(config)
if err := consensusState.ReplayConsole(walFile); err != nil {
@ -436,12 +431,7 @@ func RunReplayConsole(config cfg.Config) {
}
}
func RunReplay(config cfg.Config) {
walFile := config.GetString("cswal")
if walFile == "" {
Exit("cswal file name not set in tendermint config")
}
func RunReplay(config cfg.Config, walFile string) {
consensusState := newConsensusState(config)
if err := consensusState.ReplayMessages(walFile); err != nil {


+ 34
- 30
proxy/app_conn.go View File

@ -1,32 +1,32 @@
package proxy
import (
tmspcli "github.com/tendermint/tmsp/client"
"github.com/tendermint/tmsp/types"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/types"
)
//----------------------------------------------------------------------------------------
// Enforce which tmsp msgs can be sent on a connection at the type level
// Enforce which abci msgs can be sent on a connection at the type level
type AppConnConsensus interface {
SetResponseCallback(tmspcli.Callback)
SetResponseCallback(abcicli.Callback)
Error() error
InitChainSync(validators []*types.Validator) (err error)
BeginBlockSync(height uint64) (err error)
AppendTxAsync(tx []byte) *tmspcli.ReqRes
EndBlockSync(height uint64) (changedValidators []*types.Validator, err error)
BeginBlockSync(hash []byte, header *types.Header) (err error)
DeliverTxAsync(tx []byte) *abcicli.ReqRes
EndBlockSync(height uint64) (types.ResponseEndBlock, error)
CommitSync() (res types.Result)
}
type AppConnMempool interface {
SetResponseCallback(tmspcli.Callback)
SetResponseCallback(abcicli.Callback)
Error() error
CheckTxAsync(tx []byte) *tmspcli.ReqRes
CheckTxAsync(tx []byte) *abcicli.ReqRes
FlushAsync() *tmspcli.ReqRes
FlushAsync() *abcicli.ReqRes
FlushSync() error
}
@ -34,42 +34,46 @@ type AppConnQuery interface {
Error() error
EchoSync(string) (res types.Result)
InfoSync() (res types.Result)
InfoSync() (types.ResponseInfo, error)
QuerySync(tx []byte) (res types.Result)
// SetOptionSync(key string, value string) (res types.Result)
}
//-----------------------------------------------------------------------------------------
// Implements AppConnConsensus (subset of tmspcli.Client)
// Implements AppConnConsensus (subset of abcicli.Client)
type appConnConsensus struct {
appConn tmspcli.Client
appConn abcicli.Client
}
func NewAppConnConsensus(appConn tmspcli.Client) *appConnConsensus {
func NewAppConnConsensus(appConn abcicli.Client) *appConnConsensus {
return &appConnConsensus{
appConn: appConn,
}
}
func (app *appConnConsensus) SetResponseCallback(cb tmspcli.Callback) {
func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) {
app.appConn.SetResponseCallback(cb)
}
func (app *appConnConsensus) Error() error {
return app.appConn.Error()
}
func (app *appConnConsensus) InitChainSync(validators []*types.Validator) (err error) {
return app.appConn.InitChainSync(validators)
}
func (app *appConnConsensus) BeginBlockSync(height uint64) (err error) {
return app.appConn.BeginBlockSync(height)
func (app *appConnConsensus) BeginBlockSync(hash []byte, header *types.Header) (err error) {
return app.appConn.BeginBlockSync(hash, header)
}
func (app *appConnConsensus) AppendTxAsync(tx []byte) *tmspcli.ReqRes {
return app.appConn.AppendTxAsync(tx)
func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes {
return app.appConn.DeliverTxAsync(tx)
}
func (app *appConnConsensus) EndBlockSync(height uint64) (changedValidators []*types.Validator, err error) {
func (app *appConnConsensus) EndBlockSync(height uint64) (types.ResponseEndBlock, error) {
return app.appConn.EndBlockSync(height)
}
@ -78,19 +82,19 @@ func (app *appConnConsensus) CommitSync() (res types.Result) {
}
//------------------------------------------------
// Implements AppConnMempool (subset of tmspcli.Client)
// Implements AppConnMempool (subset of abcicli.Client)
type appConnMempool struct {
appConn tmspcli.Client
appConn abcicli.Client
}
func NewAppConnMempool(appConn tmspcli.Client) *appConnMempool {
func NewAppConnMempool(appConn abcicli.Client) *appConnMempool {
return &appConnMempool{
appConn: appConn,
}
}
func (app *appConnMempool) SetResponseCallback(cb tmspcli.Callback) {
func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) {
app.appConn.SetResponseCallback(cb)
}
@ -98,7 +102,7 @@ func (app *appConnMempool) Error() error {
return app.appConn.Error()
}
func (app *appConnMempool) FlushAsync() *tmspcli.ReqRes {
func (app *appConnMempool) FlushAsync() *abcicli.ReqRes {
return app.appConn.FlushAsync()
}
@ -106,18 +110,18 @@ func (app *appConnMempool) FlushSync() error {
return app.appConn.FlushSync()
}
func (app *appConnMempool) CheckTxAsync(tx []byte) *tmspcli.ReqRes {
func (app *appConnMempool) CheckTxAsync(tx []byte) *abcicli.ReqRes {
return app.appConn.CheckTxAsync(tx)
}
//------------------------------------------------
// Implements AppConnQuery (subset of tmspcli.Client)
// Implements AppConnQuery (subset of abcicli.Client)
type appConnQuery struct {
appConn tmspcli.Client
appConn abcicli.Client
}
func NewAppConnQuery(appConn tmspcli.Client) *appConnQuery {
func NewAppConnQuery(appConn abcicli.Client) *appConnQuery {
return &appConnQuery{
appConn: appConn,
}
@ -131,7 +135,7 @@ func (app *appConnQuery) EchoSync(msg string) (res types.Result) {
return app.appConn.EchoSync(msg)
}
func (app *appConnQuery) InfoSync() (res types.Result) {
func (app *appConnQuery) InfoSync() (types.ResponseInfo, error) {
return app.appConn.InfoSync()
}


+ 17
- 17
proxy/app_conn_test.go View File

@ -5,29 +5,29 @@ import (
"testing"
. "github.com/tendermint/go-common"
tmspcli "github.com/tendermint/tmsp/client"
"github.com/tendermint/tmsp/example/dummy"
"github.com/tendermint/tmsp/server"
"github.com/tendermint/tmsp/types"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/example/dummy"
"github.com/tendermint/abci/server"
"github.com/tendermint/abci/types"
)
//----------------------------------------
type AppConnTest interface {
EchoAsync(string) *tmspcli.ReqRes
EchoAsync(string) *abcicli.ReqRes
FlushSync() error
InfoSync() (res types.Result)
InfoSync() (types.ResponseInfo, error)
}
type appConnTest struct {
appConn tmspcli.Client
appConn abcicli.Client
}
func NewAppConnTest(appConn tmspcli.Client) AppConnTest {
func NewAppConnTest(appConn abcicli.Client) AppConnTest {
return &appConnTest{appConn}
}
func (app *appConnTest) EchoAsync(msg string) *tmspcli.ReqRes {
func (app *appConnTest) EchoAsync(msg string) *abcicli.ReqRes {
return app.appConn.EchoAsync(msg)
}
@ -35,7 +35,7 @@ func (app *appConnTest) FlushSync() error {
return app.appConn.FlushSync()
}
func (app *appConnTest) InfoSync() types.Result {
func (app *appConnTest) InfoSync() (types.ResponseInfo, error) {
return app.appConn.InfoSync()
}
@ -54,7 +54,7 @@ func TestEcho(t *testing.T) {
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewTMSPClient()
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
}
@ -78,7 +78,7 @@ func BenchmarkEcho(b *testing.B) {
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewTMSPClient()
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
}
@ -107,18 +107,18 @@ func TestInfo(t *testing.T) {
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewTMSPClient()
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
}
proxy := NewAppConnTest(cli)
t.Log("Connected")
res := proxy.InfoSync()
if res.IsErr() {
resInfo, err := proxy.InfoSync()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if string(res.Data) != "size:0" {
t.Error("Expected ResponseInfo with one element 'size:0' but got something else")
if string(resInfo.Data) != "{\"size\":0}" {
t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else")
}
}

+ 13
- 11
proxy/client.go View File

@ -5,15 +5,15 @@ import (
"sync"
cfg "github.com/tendermint/go-config"
tmspcli "github.com/tendermint/tmsp/client"
"github.com/tendermint/tmsp/example/dummy"
nilapp "github.com/tendermint/tmsp/example/nil"
"github.com/tendermint/tmsp/types"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/example/dummy"
nilapp "github.com/tendermint/abci/example/nil"
"github.com/tendermint/abci/types"
)
// NewTMSPClient returns newly connected client
// NewABCIClient returns newly connected client
type ClientCreator interface {
NewTMSPClient() (tmspcli.Client, error)
NewABCIClient() (abcicli.Client, error)
}
//----------------------------------------------------
@ -31,8 +31,8 @@ func NewLocalClientCreator(app types.Application) ClientCreator {
}
}
func (l *localClientCreator) NewTMSPClient() (tmspcli.Client, error) {
return tmspcli.NewLocalClient(l.mtx, l.app), nil
func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) {
return abcicli.NewLocalClient(l.mtx, l.app), nil
}
//---------------------------------------------------------------
@ -52,9 +52,9 @@ func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCrea
}
}
func (r *remoteClientCreator) NewTMSPClient() (tmspcli.Client, error) {
func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) {
// Run forever in a loop
remoteApp, err := tmspcli.NewClient(r.addr, r.transport, r.mustConnect)
remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect)
if err != nil {
return nil, fmt.Errorf("Failed to connect to proxy: %v", err)
}
@ -66,11 +66,13 @@ func (r *remoteClientCreator) NewTMSPClient() (tmspcli.Client, error) {
func DefaultClientCreator(config cfg.Config) ClientCreator {
addr := config.GetString("proxy_app")
transport := config.GetString("tmsp")
transport := config.GetString("abci")
switch addr {
case "dummy":
return NewLocalClientCreator(dummy.NewDummyApplication())
case "persistent_dummy":
return NewLocalClientCreator(dummy.NewPersistentDummyApplication(config.GetString("db_dir")))
case "nilapp":
return NewLocalClientCreator(nilapp.NewNilApplication())
default:


+ 29
- 21
proxy/multi_app_conn.go View File

@ -5,6 +5,8 @@ import (
cfg "github.com/tendermint/go-config"
)
//-----------------------------
// Tendermint's interface to the application consists of multiple connections
type AppConns interface {
Service
@ -14,19 +16,27 @@ type AppConns interface {
Query() AppConnQuery
}
func NewAppConns(config cfg.Config, clientCreator ClientCreator, state State, blockStore BlockStore) AppConns {
return NewMultiAppConn(config, clientCreator, state, blockStore)
func NewAppConns(config cfg.Config, clientCreator ClientCreator, handshaker Handshaker) AppConns {
return NewMultiAppConn(config, clientCreator, handshaker)
}
//-----------------------------
// multiAppConn implements AppConns
type Handshaker interface {
Handshake(AppConns) error
}
// a multiAppConn is made of a few appConns (mempool, consensus, query)
// and manages their underlying tmsp clients, ensuring they reboot together
// and manages their underlying abci clients, including the handshake
// which ensures the app and tendermint are synced.
// TODO: on app restart, clients must reboot together
type multiAppConn struct {
QuitService
BaseService
config cfg.Config
state State
blockStore BlockStore
handshaker Handshaker
mempoolConn *appConnMempool
consensusConn *appConnConsensus
@ -35,15 +45,14 @@ type multiAppConn struct {
clientCreator ClientCreator
}
// Make all necessary tmsp connections to the application
func NewMultiAppConn(config cfg.Config, clientCreator ClientCreator, state State, blockStore BlockStore) *multiAppConn {
// Make all necessary abci connections to the application
func NewMultiAppConn(config cfg.Config, clientCreator ClientCreator, handshaker Handshaker) *multiAppConn {
multiAppConn := &multiAppConn{
config: config,
state: state,
blockStore: blockStore,
handshaker: handshaker,
clientCreator: clientCreator,
}
multiAppConn.QuitService = *NewQuitService(log, "multiAppConn", multiAppConn)
multiAppConn.BaseService = *NewBaseService(log, "multiAppConn", multiAppConn)
return multiAppConn
}
@ -57,39 +66,38 @@ func (app *multiAppConn) Consensus() AppConnConsensus {
return app.consensusConn
}
// Returns the query Connection
func (app *multiAppConn) Query() AppConnQuery {
return app.queryConn
}
func (app *multiAppConn) OnStart() error {
app.QuitService.OnStart()
app.BaseService.OnStart()
// query connection
querycli, err := app.clientCreator.NewTMSPClient()
querycli, err := app.clientCreator.NewABCIClient()
if err != nil {
return err
}
app.queryConn = NewAppConnQuery(querycli)
// mempool connection
memcli, err := app.clientCreator.NewTMSPClient()
memcli, err := app.clientCreator.NewABCIClient()
if err != nil {
return err
}
app.mempoolConn = NewAppConnMempool(memcli)
// consensus connection
concli, err := app.clientCreator.NewTMSPClient()
concli, err := app.clientCreator.NewABCIClient()
if err != nil {
return err
}
app.consensusConn = NewAppConnConsensus(concli)
// TODO: handshake
// TODO: replay blocks
// TODO: (on restart) replay mempool
// ensure app is synced to the latest state
if app.handshaker != nil {
return app.handshaker.Handshake(app)
}
return nil
}

+ 0
- 9
proxy/state.go View File

@ -1,9 +0,0 @@
package proxy
type State interface {
// TODO
}
type BlockStore interface {
// TODO
}

+ 25
- 0
rpc/core/abci.go View File

@ -0,0 +1,25 @@
package core
import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
//-----------------------------------------------------------------------------
func ABCIQuery(query []byte) (*ctypes.ResultABCIQuery, error) {
res := proxyAppQuery.QuerySync(query)
return &ctypes.ResultABCIQuery{res}, nil
}
func ABCIInfo() (*ctypes.ResultABCIInfo, error) {
res, err := proxyAppQuery.InfoSync()
if err != nil {
return nil, err
}
return &ctypes.ResultABCIInfo{
Data: res.Data,
Version: res.Version,
LastBlockHeight: res.LastBlockHeight,
LastBlockAppHash: res.LastBlockAppHash,
}, nil
}

+ 34
- 36
rpc/core/mempool.go View File

@ -6,7 +6,7 @@ import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
abci "github.com/tendermint/abci/types"
)
//-----------------------------------------------------------------------------
@ -23,8 +23,8 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
// Returns with the response from CheckTx
func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
resCh := make(chan *tmsp.Response, 1)
err := mempool.CheckTx(tx, func(res *tmsp.Response) {
resCh := make(chan *abci.Response, 1)
err := mempool.CheckTx(tx, func(res *abci.Response) {
resCh <- res
})
if err != nil {
@ -39,61 +39,59 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
}, nil
}
// CONTRACT: returns error==nil iff the tx is included in a block.
//
// If CheckTx fails, return with the response from CheckTx AND an error.
// Else, block until the tx is included in a block,
// and return the result of AppendTx (with no error).
// Even if AppendTx fails, so long as the tx is included in a block this function
// will not return an error - it is the caller's responsibility to check res.Code.
// The function times out after five minutes and returns the result of CheckTx and an error.
// TODO: smarter timeout logic or someway to cancel (tx not getting committed is a sign of a larger problem!)
func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
// CONTRACT: only returns error if mempool.BroadcastTx errs (ie. problem with the app)
// or if we timeout waiting for tx to commit.
// If CheckTx or DeliverTx fail, no error will be returned, but the returned result
// will contain a non-OK ABCI code.
func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
// subscribe to tx being committed in block
appendTxResCh := make(chan types.EventDataTx, 1)
deliverTxResCh := make(chan types.EventDataTx, 1)
types.AddListenerForEvent(eventSwitch, "rpc", types.EventStringTx(tx), func(data types.TMEventData) {
appendTxResCh <- data.(types.EventDataTx)
deliverTxResCh <- data.(types.EventDataTx)
})
// broadcast the tx and register checktx callback
checkTxResCh := make(chan *tmsp.Response, 1)
err := mempool.CheckTx(tx, func(res *tmsp.Response) {
checkTxResCh := make(chan *abci.Response, 1)
err := mempool.CheckTx(tx, func(res *abci.Response) {
checkTxResCh <- res
})
if err != nil {
log.Error("err", "err", err)
return nil, fmt.Errorf("Error broadcasting transaction: %v", err)
}
checkTxRes := <-checkTxResCh
checkTxR := checkTxRes.GetCheckTx()
if r := checkTxR; r.Code != tmsp.CodeType_OK {
if checkTxR.Code != abci.CodeType_OK {
// CheckTx failed!
return &ctypes.ResultBroadcastTx{
Code: r.Code,
Data: r.Data,
Log: r.Log,
}, fmt.Errorf("Check tx failed with non-zero code: %s. Data: %X; Log: %s", r.Code.String(), r.Data, r.Log)
return &ctypes.ResultBroadcastTxCommit{
CheckTx: checkTxR,
DeliverTx: nil,
}, nil
}
// Wait for the tx to be included in a block,
// timeout after something reasonable.
timer := time.NewTimer(60 * 5 * time.Second)
// TODO: configureable?
timer := time.NewTimer(60 * 2 * time.Second)
select {
case appendTxRes := <-appendTxResCh:
case deliverTxRes := <-deliverTxResCh:
// The tx was included in a block.
// NOTE we don't return an error regardless of the AppendTx code;
// clients must check this to see if they need to send a new tx!
return &ctypes.ResultBroadcastTx{
Code: appendTxRes.Code,
Data: appendTxRes.Result,
Log: appendTxRes.Log,
deliverTxR := &abci.ResponseDeliverTx{
Code: deliverTxRes.Code,
Data: deliverTxRes.Data,
Log: deliverTxRes.Log,
}
log.Notice("DeliverTx passed ", "tx", []byte(tx), "response", deliverTxR)
return &ctypes.ResultBroadcastTxCommit{
CheckTx: checkTxR,
DeliverTx: deliverTxR,
}, nil
case <-timer.C:
r := checkTxR
return &ctypes.ResultBroadcastTx{
Code: r.Code,
Data: r.Data,
Log: r.Log,
log.Error("failed to include tx")
return &ctypes.ResultBroadcastTxCommit{
CheckTx: checkTxR,
DeliverTx: nil,
}, fmt.Errorf("Timed out waiting for transaction to be included in a block")
}


+ 2
- 2
rpc/core/pipe.go View File

@ -8,7 +8,7 @@ import (
"github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
abci "github.com/tendermint/abci/types"
)
//-----------------------------------------------------
@ -28,7 +28,7 @@ type Consensus interface {
type Mempool interface {
Size() int
CheckTx(types.Tx, func(*tmsp.Response)) error
CheckTx(types.Tx, func(*abci.Response)) error
Reap(int) []types.Tx
Flush()
}


+ 7
- 7
rpc/core/routes.go View File

@ -28,9 +28,9 @@ var Routes = map[string]*rpc.RPCFunc{
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSyncResult, "tx"),
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsyncResult, "tx"),
// tmsp API
"tmsp_query": rpc.NewRPCFunc(TMSPQueryResult, "query"),
"tmsp_info": rpc.NewRPCFunc(TMSPInfoResult, ""),
// abci API
"abci_query": rpc.NewRPCFunc(ABCIQueryResult, "query"),
"abci_info": rpc.NewRPCFunc(ABCIInfoResult, ""),
// control API
"dial_seeds": rpc.NewRPCFunc(UnsafeDialSeedsResult, "seeds"),
@ -163,16 +163,16 @@ func BroadcastTxAsyncResult(tx []byte) (ctypes.TMResult, error) {
}
}
func TMSPQueryResult(query []byte) (ctypes.TMResult, error) {
if r, err := TMSPQuery(query); err != nil {
func ABCIQueryResult(query []byte) (ctypes.TMResult, error) {
if r, err := ABCIQuery(query); err != nil {
return nil, err
} else {
return r, nil
}
}
func TMSPInfoResult() (ctypes.TMResult, error) {
if r, err := TMSPInfo(); err != nil {
func ABCIInfoResult() (ctypes.TMResult, error) {
if r, err := ABCIInfo(); err != nil {
return nil, err
} else {
return r, nil


+ 0
- 17
rpc/core/tmsp.go View File

@ -1,17 +0,0 @@
package core
import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
//-----------------------------------------------------------------------------
func TMSPQuery(query []byte) (*ctypes.ResultTMSPQuery, error) {
res := proxyAppQuery.QuerySync(query)
return &ctypes.ResultTMSPQuery{res}, nil
}
func TMSPInfo() (*ctypes.ResultTMSPInfo, error) {
res := proxyAppQuery.InfoSync()
return &ctypes.ResultTMSPInfo{res}, nil
}

+ 22
- 12
rpc/core/types/responses.go View File

@ -6,7 +6,7 @@ import (
"github.com/tendermint/go-rpc/types"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
abci "github.com/tendermint/abci/types"
)
type ResultBlockchainInfo struct {
@ -58,22 +58,30 @@ type ResultDumpConsensusState struct {
}
type ResultBroadcastTx struct {
Code tmsp.CodeType `json:"code"`
Code abci.CodeType `json:"code"`
Data []byte `json:"data"`
Log string `json:"log"`
}
type ResultBroadcastTxCommit struct {
CheckTx *abci.ResponseCheckTx `json:"check_tx"`
DeliverTx *abci.ResponseDeliverTx `json:"deliver_tx"`
}
type ResultUnconfirmedTxs struct {
N int `json:"n_txs"`
Txs []types.Tx `json:"txs"`
}
type ResultTMSPInfo struct {
Result tmsp.Result `json:"result"`
type ResultABCIInfo struct {
Data string `json:"data"`
Version string `json:"version"`
LastBlockHeight uint64 `json:"last_block_height"`
LastBlockAppHash []byte `json:"last_block_app_hash"`
}
type ResultTMSPQuery struct {
Result tmsp.Result `json:"result"`
type ResultABCIQuery struct {
Result abci.Result `json:"result"`
}
type ResultUnsafeFlushMempool struct{}
@ -112,12 +120,13 @@ const (
ResultTypeDumpConsensusState = byte(0x41)
// 0x6 bytes are for txs / the application
ResultTypeBroadcastTx = byte(0x60)
ResultTypeUnconfirmedTxs = byte(0x61)
ResultTypeBroadcastTx = byte(0x60)
ResultTypeUnconfirmedTxs = byte(0x61)
ResultTypeBroadcastTxCommit = byte(0x62)
// 0x7 bytes are for querying the application
ResultTypeTMSPQuery = byte(0x70)
ResultTypeTMSPInfo = byte(0x71)
ResultTypeABCIQuery = byte(0x70)
ResultTypeABCIInfo = byte(0x71)
// 0x8 bytes are for events
ResultTypeSubscribe = byte(0x80)
@ -148,6 +157,7 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&ResultValidators{}, ResultTypeValidators},
wire.ConcreteType{&ResultDumpConsensusState{}, ResultTypeDumpConsensusState},
wire.ConcreteType{&ResultBroadcastTx{}, ResultTypeBroadcastTx},
wire.ConcreteType{&ResultBroadcastTxCommit{}, ResultTypeBroadcastTxCommit},
wire.ConcreteType{&ResultUnconfirmedTxs{}, ResultTypeUnconfirmedTxs},
wire.ConcreteType{&ResultSubscribe{}, ResultTypeSubscribe},
wire.ConcreteType{&ResultUnsubscribe{}, ResultTypeUnsubscribe},
@ -157,6 +167,6 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&ResultUnsafeProfile{}, ResultTypeUnsafeStopCPUProfiler},
wire.ConcreteType{&ResultUnsafeProfile{}, ResultTypeUnsafeWriteHeapProfile},
wire.ConcreteType{&ResultUnsafeFlushMempool{}, ResultTypeUnsafeFlushMempool},
wire.ConcreteType{&ResultTMSPQuery{}, ResultTypeTMSPQuery},
wire.ConcreteType{&ResultTMSPInfo{}, ResultTypeTMSPInfo},
wire.ConcreteType{&ResultABCIQuery{}, ResultTypeABCIQuery},
wire.ConcreteType{&ResultABCIInfo{}, ResultTypeABCIInfo},
)

+ 18
- 0
rpc/grpc/api.go View File

@ -0,0 +1,18 @@
package core_grpc
import (
core "github.com/tendermint/tendermint/rpc/core"
context "golang.org/x/net/context"
)
type broadcastAPI struct {
}
func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) {
res, err := core.BroadcastTxCommit(req.Tx)
if err != nil {
return nil, err
}
return &ResponseBroadcastTx{res.CheckTx, res.DeliverTx}, nil
}

+ 44
- 0
rpc/grpc/client_server.go View File

@ -0,0 +1,44 @@
package core_grpc
import (
"fmt"
"net"
"strings"
"time"
"google.golang.org/grpc"
. "github.com/tendermint/go-common"
)
// Start the grpcServer in a go routine
func StartGRPCServer(protoAddr string) (net.Listener, error) {
parts := strings.SplitN(protoAddr, "://", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("Invalid listen address for grpc server (did you forget a tcp:// prefix?) : %s", protoAddr)
}
proto, addr := parts[0], parts[1]
ln, err := net.Listen(proto, addr)
if err != nil {
return nil, err
}
grpcServer := grpc.NewServer()
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
go grpcServer.Serve(ln)
return ln, nil
}
// Start the client by dialing the server
func StartGRPCClient(protoAddr string) BroadcastAPIClient {
conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
if err != nil {
panic(err)
}
return NewBroadcastAPIClient(conn)
}
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
return Connect(addr)
}

+ 3
- 0
rpc/grpc/compile.sh View File

@ -0,0 +1,3 @@
#! /bin/bash
protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto

+ 174
- 0
rpc/grpc/types.pb.go View File

@ -0,0 +1,174 @@
// Code generated by protoc-gen-go.
// source: types.proto
// DO NOT EDIT!
/*
Package core_grpc is a generated protocol buffer package.
It is generated from these files:
types.proto
It has these top-level messages:
RequestBroadcastTx
ResponseBroadcastTx
*/
package core_grpc
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import types "github.com/tendermint/abci/types"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type RequestBroadcastTx struct {
Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"`
}
func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} }
func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) }
func (*RequestBroadcastTx) ProtoMessage() {}
func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *RequestBroadcastTx) GetTx() []byte {
if m != nil {
return m.Tx
}
return nil
}
type ResponseBroadcastTx struct {
CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"`
DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"`
}
func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} }
func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) }
func (*ResponseBroadcastTx) ProtoMessage() {}
func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx {
if m != nil {
return m.CheckTx
}
return nil
}
func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx {
if m != nil {
return m.DeliverTx
}
return nil
}
func init() {
proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx")
proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for BroadcastAPI service
type BroadcastAPIClient interface {
BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error)
}
type broadcastAPIClient struct {
cc *grpc.ClientConn
}
func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient {
return &broadcastAPIClient{cc}
}
func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) {
out := new(ResponseBroadcastTx)
err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for BroadcastAPI service
type BroadcastAPIServer interface {
BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error)
}
func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) {
s.RegisterService(&_BroadcastAPI_serviceDesc, srv)
}
func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RequestBroadcastTx)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BroadcastAPIServer).BroadcastTx(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/core_grpc.BroadcastAPI/BroadcastTx",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx))
}
return interceptor(ctx, in, info, handler)
}
var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{
ServiceName: "core_grpc.BroadcastAPI",
HandlerType: (*BroadcastAPIServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "BroadcastTx",
Handler: _BroadcastAPI_BroadcastTx_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "types.proto",
}
func init() { proto.RegisterFile("types.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 226 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48,
0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4c, 0xce, 0x2f, 0x4a, 0x8d, 0x4f, 0x2f,
0x2a, 0x48, 0x96, 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f,
0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xc9, 0x2d, 0x2e, 0xd0, 0x07,
0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xa4, 0xc2, 0x25, 0x14, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0xe2,
0x54, 0x94, 0x9f, 0x98, 0x92, 0x9c, 0x58, 0x5c, 0x12, 0x52, 0x21, 0xc4, 0xc7, 0xc5, 0x54, 0x52,
0x21, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x13, 0xc4, 0x54, 0x52, 0xa1, 0x54, 0xc7, 0x25, 0x1c, 0x94,
0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x8a, 0xac, 0xcc, 0x90, 0x8b, 0x23, 0x39, 0x23, 0x35, 0x39,
0x3b, 0x1e, 0xaa, 0x98, 0xdb, 0x48, 0x4c, 0x0f, 0x62, 0x38, 0x4c, 0xb5, 0x33, 0x48, 0x3a, 0xa4,
0x22, 0x88, 0x3d, 0x19, 0xc2, 0x10, 0x32, 0xe1, 0xe2, 0x4c, 0x2c, 0x28, 0x48, 0xcd, 0x4b, 0x01,
0xe9, 0x61, 0x02, 0xeb, 0x11, 0x47, 0xd3, 0xe3, 0x08, 0x96, 0x0f, 0xa9, 0x08, 0xe2, 0x48, 0x84,
0xb2, 0x8c, 0x62, 0xb8, 0x78, 0xe0, 0xf6, 0x3a, 0x06, 0x78, 0x0a, 0xf9, 0x70, 0x71, 0x23, 0xbb,
0x43, 0x56, 0x0f, 0xee, 0x7d, 0x3d, 0x4c, 0xdf, 0x48, 0xc9, 0xa1, 0x48, 0x63, 0x78, 0x23, 0x89,
0x0d, 0x1c, 0x14, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0x73, 0x87, 0xb0, 0x52, 0x01,
0x00, 0x00,
}

+ 29
- 0
rpc/grpc/types.proto View File

@ -0,0 +1,29 @@
syntax = "proto3";
package core_grpc;
import "github.com/tendermint/abci/types/types.proto";
//----------------------------------------
// Message types
//----------------------------------------
// Request types
message RequestBroadcastTx {
bytes tx = 1;
}
//----------------------------------------
// Response types
message ResponseBroadcastTx{
types.ResponseCheckTx check_tx = 1;
types.ResponseDeliverTx deliver_tx = 2;
}
//----------------------------------------
// Service Definition
service BroadcastAPI {
rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ;
}

+ 28
- 17
rpc/test/client_test.go View File

@ -5,14 +5,15 @@ import (
crand "crypto/rand"
"fmt"
"math/rand"
"strings"
"testing"
"time"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-wire"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
"github.com/tendermint/abci/example/dummy"
abci "github.com/tendermint/abci/types"
)
//--------------------------------------------------------------------------------
@ -91,7 +92,7 @@ func TestJSONBroadcastTxSync(t *testing.T) {
func testBroadcastTxSync(t *testing.T, resI interface{}, tx []byte) {
tmRes := resI.(*ctypes.TMResult)
res := (*tmRes).(*ctypes.ResultBroadcastTx)
if res.Code != tmsp.CodeType_OK {
if res.Code != abci.CodeType_OK {
panic(Fmt("BroadcastTxSync got non-zero exit code: %v. %X; %s", res.Code, res.Data, res.Log))
}
mem := node.MempoolReactor().Mempool
@ -129,36 +130,41 @@ func sendTx() ([]byte, []byte) {
return k, v
}
func TestURITMSPQuery(t *testing.T) {
func TestURIABCIQuery(t *testing.T) {
k, v := sendTx()
time.Sleep(time.Second)
tmResult := new(ctypes.TMResult)
_, err := clientURI.Call("tmsp_query", map[string]interface{}{"query": Fmt("%X", k)}, tmResult)
_, err := clientURI.Call("abci_query", map[string]interface{}{"query": k}, tmResult)
if err != nil {
panic(err)
}
testTMSPQuery(t, tmResult, v)
testABCIQuery(t, tmResult, v)
}
func TestJSONTMSPQuery(t *testing.T) {
func TestJSONABCIQuery(t *testing.T) {
k, v := sendTx()
tmResult := new(ctypes.TMResult)
_, err := clientJSON.Call("tmsp_query", []interface{}{Fmt("%X", k)}, tmResult)
_, err := clientJSON.Call("abci_query", []interface{}{k}, tmResult)
if err != nil {
panic(err)
}
testTMSPQuery(t, tmResult, v)
testABCIQuery(t, tmResult, v)
}
func testTMSPQuery(t *testing.T, statusI interface{}, value []byte) {
func testABCIQuery(t *testing.T, statusI interface{}, value []byte) {
tmRes := statusI.(*ctypes.TMResult)
query := (*tmRes).(*ctypes.ResultTMSPQuery)
query := (*tmRes).(*ctypes.ResultABCIQuery)
if query.Result.IsErr() {
panic(Fmt("Query returned an err: %v", query))
}
qResult := new(dummy.QueryResult)
if err := wire.ReadJSONBytes(query.Result.Data, qResult); err != nil {
t.Fatal(err)
}
// XXX: specific to value returned by the dummy
if !strings.Contains(string(query.Result.Data), "exists=true") {
panic(Fmt("Query error. Expected to find 'exists=true'. Got: %s", query.Result.Data))
if qResult.Exists != true {
panic(Fmt("Query error. Expected to find 'exists=true'. Got: %v", qResult))
}
}
@ -187,9 +193,14 @@ func TestJSONBroadcastTxCommit(t *testing.T) {
func testBroadcastTxCommit(t *testing.T, resI interface{}, tx []byte) {
tmRes := resI.(*ctypes.TMResult)
res := (*tmRes).(*ctypes.ResultBroadcastTx)
if res.Code != tmsp.CodeType_OK {
panic(Fmt("BroadcastTxCommit got non-zero exit code: %v. %X; %s", res.Code, res.Data, res.Log))
res := (*tmRes).(*ctypes.ResultBroadcastTxCommit)
checkTx := res.CheckTx
if checkTx.Code != abci.CodeType_OK {
panic(Fmt("BroadcastTxCommit got non-zero exit code from CheckTx: %v. %X; %s", checkTx.Code, checkTx.Data, checkTx.Log))
}
deliverTx := res.DeliverTx
if deliverTx.Code != abci.CodeType_OK {
panic(Fmt("BroadcastTxCommit got non-zero exit code from CheckTx: %v. %X; %s", deliverTx.Code, deliverTx.Data, deliverTx.Log))
}
mem := node.MempoolReactor().Mempool
if mem.Size() != 0 {
@ -284,7 +295,7 @@ func TestWSTxEvent(t *testing.T) {
if bytes.Compare([]byte(evt.Tx), tx) != 0 {
t.Error("Event returned different tx")
}
if evt.Code != tmsp.CodeType_OK {
if evt.Code != abci.CodeType_OK {
t.Error("Event returned tx error code", evt.Code)
}
return nil


+ 24
- 0
rpc/test/grpc_test.go View File

@ -0,0 +1,24 @@
package rpctest
import (
"testing"
"golang.org/x/net/context"
"github.com/tendermint/tendermint/rpc/grpc"
)
//-------------------------------------------
func TestBroadcastTx(t *testing.T) {
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")})
if err != nil {
t.Fatal(err)
}
if res.CheckTx.Code != 0 {
t.Fatalf("Non-zero check tx code: %d", res.CheckTx.Code)
}
if res.DeliverTx.Code != 0 {
t.Fatalf("Non-zero append tx code: %d", res.DeliverTx.Code)
}
}

+ 7
- 0
rpc/test/helpers.go View File

@ -13,6 +13,7 @@ import (
"github.com/tendermint/tendermint/config/tendermint_test"
nm "github.com/tendermint/tendermint/node"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/rpc/grpc"
)
// global variables for use across all tests
@ -24,8 +25,10 @@ var (
requestAddr string
websocketAddr string
websocketEndpoint string
grpcAddr string
clientURI *client.ClientURI
clientJSON *client.ClientJSONRPC
clientGRPC core_grpc.BroadcastAPIClient
)
// initialize config and create new node
@ -33,12 +36,14 @@ func init() {
config = tendermint_test.ResetConfig("rpc_test_client_test")
chainID = config.GetString("chain_id")
rpcAddr = config.GetString("rpc_laddr")
grpcAddr = config.GetString("grpc_laddr")
requestAddr = rpcAddr
websocketAddr = rpcAddr
websocketEndpoint = "/websocket"
clientURI = client.NewClientURI(requestAddr)
clientJSON = client.NewClientJSONRPC(requestAddr)
clientGRPC = core_grpc.StartGRPCClient(grpcAddr)
// TODO: change consensus/state.go timeouts to be shorter
@ -59,6 +64,8 @@ func newNode(ready chan struct{}) {
// Run the RPC server.
node.StartRPC()
time.Sleep(time.Second)
ready <- struct{}{}
// Sleep forever


+ 2
- 0
scripts/glide/parse.sh View File

@ -1,8 +1,10 @@
#! /bin/bash
set +u
if [[ "$GLIDE" == "" ]]; then
GLIDE=$GOPATH/src/github.com/tendermint/tendermint/glide.lock
fi
set -u
set -euo pipefail


+ 2
- 0
scripts/glide/update.sh View File

@ -7,9 +7,11 @@ IFS=$'\n\t'
LIB=$1
TMCORE=$GOPATH/src/github.com/tendermint/tendermint
set +u
if [[ "$GLIDE" == "" ]]; then
GLIDE=$TMCORE/glide.lock
fi
set -u
OLD_COMMIT=`bash $TMCORE/scripts/glide/parse.sh $LIB`


+ 13
- 0
scripts/install_abci_apps.sh View File

@ -0,0 +1,13 @@
#! /bin/bash
go get github.com/tendermint/abci/...
# get the abci commit used by tendermint
COMMIT=`bash scripts/glide/parse.sh abci`
echo "Checking out vendored commit for abci: $COMMIT"
cd $GOPATH/src/github.com/tendermint/abci
git checkout $COMMIT
glide install
go install ./cmd/...

+ 0
- 12
scripts/install_tmsp_apps.sh View File

@ -1,12 +0,0 @@
#! /bin/bash
go get github.com/tendermint/tmsp/...
# get the tmsp commit used by tendermint
COMMIT=`bash scripts/glide/parse.sh $(pwd)/glide.lock tmsp`
cd $GOPATH/src/github.com/tendermint/tmsp
git checkout $COMMIT
go install ./cmd/...

+ 19
- 0
scripts/txs/random.sh View File

@ -0,0 +1,19 @@
#! /bin/bash
set -u
function toHex() {
echo -n $1 | hexdump -ve '1/1 "%.2X"'
}
N=$1
PORT=$2
for i in `seq 1 $N`; do
# store key value pair
KEY="abcd$i"
VALUE="dcba$i"
echo "$KEY:$VALUE"
curl 127.0.0.1:$PORT/broadcast_tx_sync?tx=\"$(toHex $KEY=$VALUE)\"
done

+ 55
- 0
state/errors.go View File

@ -0,0 +1,55 @@
package state
import (
. "github.com/tendermint/go-common"
)
type (
ErrInvalidBlock error
ErrProxyAppConn error
ErrUnknownBlock struct {
height int
}
ErrBlockHashMismatch struct {
coreHash []byte
appHash []byte
height int
}
ErrAppBlockHeightTooHigh struct {
coreHeight int
appHeight int
}
ErrLastStateMismatch struct {
height int
core []byte
app []byte
}
ErrStateMismatch struct {
got *State
expected *State
}
)
func (e ErrUnknownBlock) Error() string {
return Fmt("Could not find block #%d", e.height)
}
func (e ErrBlockHashMismatch) Error() string {
return Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.appHash, e.coreHash, e.height)
}
func (e ErrAppBlockHeightTooHigh) Error() string {
return Fmt("App block height (%d) is higher than core (%d)", e.appHeight, e.coreHeight)
}
func (e ErrLastStateMismatch) Error() string {
return Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.height, e.core, e.app)
}
func (e ErrStateMismatch) Error() string {
return Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.got, e.expected)
}

+ 332
- 74
state/execution.go View File

@ -1,120 +1,205 @@
package state
import (
"bytes"
"errors"
"fmt"
"github.com/ebuchman/fail-test"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
abci "github.com/tendermint/abci/types"
)
// Validate block
func (s *State) ValidateBlock(block *types.Block) error {
return s.validateBlock(block)
}
//--------------------------------------------------
// Execute the block
// Execute the block to mutate State.
// Validates block and then executes Data.Txs in the block.
func (s *State) ExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block, blockPartsHeader types.PartSetHeader) error {
// Validate the block.
err := s.validateBlock(block)
if err != nil {
return err
if err := s.validateBlock(block); err != nil {
return ErrInvalidBlock(err)
}
// Update the validator set
// compute bitarray of validators that signed
signed := commitBitArrayFromBlock(block)
_ = signed // TODO send on begin block
// copy the valset
valSet := s.Validators.Copy()
// Update valSet with signatures from block.
updateValidatorsWithBlock(s.LastValidators, valSet, block)
// TODO: Update the validator set (e.g. block.Data.ValidatorUpdates?)
nextValSet := valSet.Copy()
// Execute the block txs
err = s.execBlockOnProxyApp(eventCache, proxyAppConn, block)
changedValidators, err := execBlockOnProxyApp(eventCache, proxyAppConn, block)
if err != nil {
// There was some error in proxyApp
// TODO Report error and wait for proxyApp to be available.
return err
return ErrProxyAppConn(err)
}
// update the validator set
err = updateValidators(nextValSet, changedValidators)
if err != nil {
log.Warn("Error changing validator set", "error", err)
// TODO: err or carry on?
}
// All good!
// Update validator accums and set state variables
nextValSet.IncrementAccum(1)
s.LastBlockHeight = block.Height
s.LastBlockHash = block.Hash()
s.LastBlockParts = blockPartsHeader
s.LastBlockTime = block.Time
s.Validators = nextValSet
s.LastValidators = valSet
s.SetBlockAndValidators(block.Header, blockPartsHeader, valSet, nextValSet)
// save state with updated height/blockhash/validators
// but stale apphash, in case we fail between Commit and Save
s.SaveIntermediate()
fail.Fail() // XXX
return nil
}
// Executes block's transactions on proxyAppConn.
// Returns a list of updates to the validator set
// TODO: Generate a bitmap or otherwise store tx validity in state.
func (s *State) execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) error {
func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) ([]*abci.Validator, error) {
var validTxs, invalidTxs = 0, 0
// Execute transactions and get hash
proxyCb := func(req *tmsp.Request, res *tmsp.Response) {
proxyCb := func(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *tmsp.Response_AppendTx:
case *abci.Response_DeliverTx:
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
// reqAppendTx := req.(tmsp.RequestAppendTx)
// reqDeliverTx := req.(abci.RequestDeliverTx)
txError := ""
apTx := r.AppendTx
if apTx.Code == tmsp.CodeType_OK {
apTx := r.DeliverTx
if apTx.Code == abci.CodeType_OK {
validTxs += 1
} else {
log.Debug("Invalid tx", "code", r.AppendTx.Code, "log", r.AppendTx.Log)
log.Debug("Invalid tx", "code", r.DeliverTx.Code, "log", r.DeliverTx.Log)
invalidTxs += 1
txError = apTx.Code.String()
}
// NOTE: if we count we can access the tx from the block instead of
// pulling it from the req
event := types.EventDataTx{
Tx: req.GetAppendTx().Tx,
Result: apTx.Data,
Code: apTx.Code,
Log: apTx.Log,
Error: txError,
Tx: req.GetDeliverTx().Tx,
Data: apTx.Data,
Code: apTx.Code,
Log: apTx.Log,
Error: txError,
}
types.FireEventTx(eventCache, event)
}
}
proxyAppConn.SetResponseCallback(proxyCb)
// TODO: BeginBlock
// Begin block
err := proxyAppConn.BeginBlockSync(block.Hash(), types.TM2PB.Header(block.Header))
if err != nil {
log.Warn("Error in proxyAppConn.BeginBlock", "error", err)
return nil, err
}
fail.Fail() // XXX
// Run txs of block
for _, tx := range block.Txs {
proxyAppConn.AppendTxAsync(tx)
fail.FailRand(len(block.Txs)) // XXX
proxyAppConn.DeliverTxAsync(tx)
if err := proxyAppConn.Error(); err != nil {
return err
return nil, err
}
}
fail.Fail() // XXX
// End block
changedValidators, err := proxyAppConn.EndBlockSync(uint64(block.Height))
respEndBlock, err := proxyAppConn.EndBlockSync(uint64(block.Height))
if err != nil {
log.Warn("Error in proxyAppConn.EndBlock", "error", err)
return err
return nil, err
}
// TODO: Do something with changedValidators
log.Info("TODO: Do something with changedValidators", "changedValidators", changedValidators)
log.Info(Fmt("ExecBlock got %v valid txs and %v invalid txs", validTxs, invalidTxs))
fail.Fail() // XXX
log.Info("Executed block", "height", block.Height, "valid txs", validTxs, "invalid txs", invalidTxs)
if len(respEndBlock.Diffs) > 0 {
log.Info("Update to validator set", "updates", abci.ValidatorsString(respEndBlock.Diffs))
}
return respEndBlock.Diffs, nil
}
func updateValidators(validators *types.ValidatorSet, changedValidators []*abci.Validator) error {
// TODO: prevent change of 1/3+ at once
for _, v := range changedValidators {
pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-wire encoded pubkey
if err != nil {
return err
}
address := pubkey.Address()
power := int64(v.Power)
// mind the overflow from uint64
if power < 0 {
return errors.New(Fmt("Power (%d) overflows int64", v.Power))
}
_, val := validators.GetByAddress(address)
if val == nil {
// add val
added := validators.Add(types.NewValidator(pubkey, power))
if !added {
return errors.New(Fmt("Failed to add new validator %X with voting power %d", address, power))
}
} else if v.Power == 0 {
// remove val
_, removed := validators.Remove(address)
if !removed {
return errors.New(Fmt("Failed to remove validator %X)"))
}
} else {
// update val
val.VotingPower = power
updated := validators.Update(val)
if !updated {
return errors.New(Fmt("Failed to update validator %X with voting power %d", address, power))
}
}
}
return nil
}
// return a bit array of validators that signed the last commit
// NOTE: assumes commits have already been authenticated
func commitBitArrayFromBlock(block *types.Block) *BitArray {
signed := NewBitArray(len(block.LastCommit.Precommits))
for i, precommit := range block.LastCommit.Precommits {
if precommit != nil {
signed.SetIndex(i, true) // val_.LastCommitHeight = block.Height - 1
}
}
return signed
}
//-----------------------------------------------------
// Validate block
func (s *State) ValidateBlock(block *types.Block) error {
return s.validateBlock(block)
}
func (s *State) validateBlock(block *types.Block) error {
// Basic block validation.
err := block.ValidateBasic(s.ChainID, s.LastBlockHeight, s.LastBlockHash, s.LastBlockParts, s.LastBlockTime, s.AppHash)
err := block.ValidateBasic(s.ChainID, s.LastBlockHeight, s.LastBlockID, s.LastBlockTime, s.AppHash)
if err != nil {
return err
}
@ -126,11 +211,11 @@ func (s *State) validateBlock(block *types.Block) error {
}
} else {
if len(block.LastCommit.Precommits) != s.LastValidators.Size() {
return fmt.Errorf("Invalid block commit size. Expected %v, got %v",
s.LastValidators.Size(), len(block.LastCommit.Precommits))
return errors.New(Fmt("Invalid block commit size. Expected %v, got %v",
s.LastValidators.Size(), len(block.LastCommit.Precommits)))
}
err := s.LastValidators.VerifyCommit(
s.ChainID, s.LastBlockHash, s.LastBlockParts, block.Height-1, block.LastCommit)
s.ChainID, s.LastBlockID, block.Height-1, block.LastCommit)
if err != nil {
return err
}
@ -139,41 +224,214 @@ func (s *State) validateBlock(block *types.Block) error {
return nil
}
// Updates the LastCommitHeight of the validators in valSet, in place.
// Assumes that lastValSet matches the valset of block.LastCommit
// CONTRACT: lastValSet is not mutated.
func updateValidatorsWithBlock(lastValSet *types.ValidatorSet, valSet *types.ValidatorSet, block *types.Block) {
//-----------------------------------------------------------------------------
// ApplyBlock executes the block, then commits and updates the mempool atomically
for i, precommit := range block.LastCommit.Precommits {
if precommit == nil {
continue
}
_, val := lastValSet.GetByIndex(i)
if val == nil {
PanicCrisis(Fmt("Failed to fetch validator at index %v", i))
}
if _, val_ := valSet.GetByAddress(val.Address); val_ != nil {
val_.LastCommitHeight = block.Height - 1
updated := valSet.Update(val_)
if !updated {
PanicCrisis("Failed to update validator LastCommitHeight")
}
} else {
// XXX This is not an error if validator was removed.
// But, we don't mutate validators yet so go ahead and panic.
PanicCrisis("Could not find validator")
}
// Execute and commit block against app, save block and state
func (s *State) ApplyBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus,
block *types.Block, partsHeader types.PartSetHeader, mempool Mempool) error {
// Run the block on the State:
// + update validator sets
// + run txs on the proxyAppConn
err := s.ExecBlock(eventCache, proxyAppConn, block, partsHeader)
if err != nil {
return errors.New(Fmt("Exec failed for application: %v", err))
}
// lock mempool, commit state, update mempoool
err = s.CommitStateUpdateMempool(proxyAppConn, block, mempool)
if err != nil {
return errors.New(Fmt("Commit failed for application: %v", err))
}
return nil
}
//-----------------------------------------------------------------------------
// mempool must be locked during commit and update
// because state is typically reset on Commit and old txs must be replayed
// against committed state before new txs are run in the mempool, lest they be invalid
func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, block *types.Block, mempool Mempool) error {
mempool.Lock()
defer mempool.Unlock()
// Commit block, get hash back
res := proxyAppConn.CommitSync()
if res.IsErr() {
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
return res
}
if res.Log != "" {
log.Debug("Commit.Log: " + res.Log)
}
// Set the state's new AppHash
s.AppHash = res.Data
type InvalidTxError struct {
Tx types.Tx
Code tmsp.CodeType
// Update mempool.
mempool.Update(block.Height, block.Txs)
return nil
}
// Updates to the mempool need to be synchronized with committing a block
// so apps can reset their transient state on Commit
type Mempool interface {
Lock()
Unlock()
Update(height int, txs []types.Tx)
}
func (txErr InvalidTxError) Error() string {
return Fmt("Invalid tx: [%v] code: [%v]", txErr.Tx, txErr.Code)
type MockMempool struct {
}
func (m MockMempool) Lock() {}
func (m MockMempool) Unlock() {}
func (m MockMempool) Update(height int, txs []types.Tx) {}
//----------------------------------------------------------------
// Handshake with app to sync to latest state of core by replaying blocks
// TODO: Should we move blockchain/store.go to its own package?
type BlockStore interface {
Height() int
LoadBlock(height int) *types.Block
LoadBlockMeta(height int) *types.BlockMeta
}
type Handshaker struct {
config cfg.Config
state *State
store BlockStore
nBlocks int // number of blocks applied to the state
}
func NewHandshaker(config cfg.Config, state *State, store BlockStore) *Handshaker {
return &Handshaker{config, state, store, 0}
}
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn
res, err := proxyApp.Query().InfoSync()
if err != nil {
return errors.New(Fmt("Error calling Info: %v", err))
}
blockHeight := int(res.LastBlockHeight) // XXX: beware overflow
appHash := res.LastBlockAppHash
log.Notice("ABCI Handshake", "appHeight", blockHeight, "appHash", appHash)
// TODO: check version
// replay blocks up to the latest in the blockstore
err = h.ReplayBlocks(appHash, blockHeight, proxyApp.Consensus())
if err != nil {
return errors.New(Fmt("Error on replay: %v", err))
}
// Save the state
h.state.Save()
// TODO: (on restart) replay mempool
return nil
}
// Replay all blocks after blockHeight and ensure the result matches the current state.
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, appConnConsensus proxy.AppConnConsensus) error {
storeBlockHeight := h.store.Height()
stateBlockHeight := h.state.LastBlockHeight
log.Notice("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
if storeBlockHeight == 0 {
return nil
} else if storeBlockHeight < appBlockHeight {
// if the app is ahead, there's nothing we can do
return ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
} else if storeBlockHeight == appBlockHeight {
// We ran Commit, but if we crashed before state.Save(),
// load the intermediate state and update the state.AppHash.
// NOTE: If ABCI allowed rollbacks, we could just replay the
// block even though it's been committed
stateAppHash := h.state.AppHash
lastBlockAppHash := h.store.LoadBlock(storeBlockHeight).AppHash
if bytes.Equal(stateAppHash, appHash) {
// we're all synced up
log.Debug("ABCI RelpayBlocks: Already synced")
} else if bytes.Equal(stateAppHash, lastBlockAppHash) {
// we crashed after commit and before saving state,
// so load the intermediate state and update the hash
h.state.LoadIntermediate()
h.state.AppHash = appHash
log.Debug("ABCI RelpayBlocks: Loaded intermediate state and updated state.AppHash")
} else {
PanicSanity(Fmt("Unexpected state.AppHash: state.AppHash %X; app.AppHash %X, lastBlock.AppHash %X", stateAppHash, appHash, lastBlockAppHash))
}
return nil
} else if storeBlockHeight == appBlockHeight+1 &&
storeBlockHeight == stateBlockHeight+1 {
// We crashed after saving the block
// but before Commit (both the state and app are behind),
// so just replay the block
// check that the lastBlock.AppHash matches the state apphash
block := h.store.LoadBlock(storeBlockHeight)
if !bytes.Equal(block.Header.AppHash, appHash) {
return ErrLastStateMismatch{storeBlockHeight, block.Header.AppHash, appHash}
}
blockMeta := h.store.LoadBlockMeta(storeBlockHeight)
h.nBlocks += 1
var eventCache types.Fireable // nil
// replay the latest block
return h.state.ApplyBlock(eventCache, appConnConsensus, block, blockMeta.PartsHeader, MockMempool{})
} else if storeBlockHeight != stateBlockHeight {
// unless we failed before committing or saving state (previous 2 case),
// the store and state should be at the same height!
PanicSanity(Fmt("Expected storeHeight (%d) and stateHeight (%d) to match.", storeBlockHeight, stateBlockHeight))
} else {
// store is more than one ahead,
// so app wants to replay many blocks
// replay all blocks starting with appBlockHeight+1
var eventCache types.Fireable // nil
// TODO: use stateBlockHeight instead and let the consensus state
// do the replay
var appHash []byte
for i := appBlockHeight + 1; i <= storeBlockHeight; i++ {
h.nBlocks += 1
block := h.store.LoadBlock(i)
_, err := execBlockOnProxyApp(eventCache, appConnConsensus, block)
if err != nil {
log.Warn("Error executing block on proxy app", "height", i, "err", err)
return err
}
// Commit block, get hash back
res := appConnConsensus.CommitSync()
if res.IsErr() {
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
return res
}
if res.Log != "" {
log.Info("Commit.Log: " + res.Log)
}
appHash = res.Data
}
if !bytes.Equal(h.state.AppHash, appHash) {
return errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash))
}
return nil
}
return nil
}

+ 210
- 0
state/execution_test.go View File

@ -0,0 +1,210 @@
package state
import (
"bytes"
"fmt"
"path"
"testing"
"github.com/tendermint/tendermint/config/tendermint_test"
// . "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/abci/example/dummy"
)
var (
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("handshake_test"))
chainID = "handshake_chain"
nBlocks = 5
mempool = MockMempool{}
testPartSize = 65536
)
//---------------------------------------
// Test block execution
func TestExecBlock(t *testing.T) {
// TODO
}
//---------------------------------------
// Test handshake/replay
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
testHandshakeReplay(t, 0)
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
testHandshakeReplay(t, 1)
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
testHandshakeReplay(t, nBlocks-1)
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
testHandshakeReplay(t, nBlocks)
}
// Make some blocks. Start a fresh app and apply n blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, n int) {
config := tendermint_test.ResetConfig("proxy_test_")
state, store := stateAndStore(config)
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "1")))
clientCreator2 := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "2")))
proxyApp := proxy.NewAppConns(config, clientCreator, NewHandshaker(config, state, store))
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
chain := makeBlockchain(t, proxyApp, state)
store.chain = chain //
latestAppHash := state.AppHash
proxyApp.Stop()
if n > 0 {
// start a new app without handshake, play n blocks
proxyApp = proxy.NewAppConns(config, clientCreator2, nil)
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
state2, _ := stateAndStore(config)
for i := 0; i < n; i++ {
block := chain[i]
err := state2.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(err)
}
}
proxyApp.Stop()
}
// now start it with the handshake
handshaker := NewHandshaker(config, state, store)
proxyApp = proxy.NewAppConns(config, clientCreator2, handshaker)
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync()
if err != nil {
t.Fatal(err)
}
// the app hash should be synced up
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
}
if handshaker.nBlocks != nBlocks-n {
t.Fatalf("Expected handshake to sync %d blocks, got %d", nBlocks-n, handshaker.nBlocks)
}
}
//--------------------------
// utils for making blocks
// make some bogus txs
func txsFunc(blockNum int) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
}
return txs
}
// sign a commit vote
func signCommit(height, round int, hash []byte, header types.PartSetHeader) *types.Vote {
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: privKey.PubKey().Address(),
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{hash, header},
}
sig := privKey.Sign(types.SignBytes(chainID, vote))
vote.Signature = sig
return vote
}
// make a blockchain with one validator
func makeBlockchain(t *testing.T, proxyApp proxy.AppConns, state *State) (blockchain []*types.Block) {
prevHash := state.LastBlockID.Hash
lastCommit := new(types.Commit)
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
for i := 1; i < nBlocks+1; i++ {
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit,
prevBlockID, valHash, state.AppHash, testPartSize)
fmt.Println(i)
fmt.Println(prevBlockID)
fmt.Println(block.LastBlockID)
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(i, err)
}
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators)
vote := signCommit(i, 0, block.Hash(), parts.Header())
_, err = voteSet.AddVote(vote)
if err != nil {
t.Fatal(err)
}
blockchain = append(blockchain, block)
prevHash = block.Hash()
prevParts = parts.Header()
lastCommit = voteSet.MakeCommit()
prevBlockID = types.BlockID{prevHash, prevParts}
}
return blockchain
}
// fresh state and mock store
func stateAndStore(config cfg.Config) (*State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
return MakeGenesisState(stateDB, &types.GenesisDoc{
ChainID: chainID,
Validators: []types.GenesisValidator{
types.GenesisValidator{privKey.PubKey(), 10000, "test"},
},
AppHash: nil,
}), NewMockBlockStore(config, nil)
}
//----------------------------------
// mock block store
type mockBlockStore struct {
config cfg.Config
chain []*types.Block
}
func NewMockBlockStore(config cfg.Config, chain []*types.Block) *mockBlockStore {
return &mockBlockStore{config, chain}
}
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] }
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
Hash: block.Hash(),
Header: block.Header,
PartsHeader: block.MakePartSet(bs.config.GetInt("block_part_size")).Header(),
}
}

+ 95
- 15
state/state.go View File

@ -7,35 +7,47 @@ import (
"time"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
var (
stateKey = []byte("stateKey")
stateKey = []byte("stateKey")
stateIntermediateKey = []byte("stateIntermediateKey")
)
//-----------------------------------------------------------------------------
// NOTE: not goroutine-safe.
type State struct {
mtx sync.Mutex
db dbm.DB
GenesisDoc *types.GenesisDoc
ChainID string
// mtx for writing to db
mtx sync.Mutex
db dbm.DB
// should not change
GenesisDoc *types.GenesisDoc
ChainID string
// updated at end of ExecBlock
LastBlockHeight int // Genesis state has this set to 0. So, Block(H=0) does not exist.
LastBlockHash []byte
LastBlockParts types.PartSetHeader
LastBlockID types.BlockID
LastBlockTime time.Time
Validators *types.ValidatorSet
LastValidators *types.ValidatorSet
AppHash []byte
LastValidators *types.ValidatorSet // block.LastCommit validated against this
// AppHash is updated after Commit
AppHash []byte
}
func LoadState(db dbm.DB) *State {
return loadState(db, stateKey)
}
func loadState(db dbm.DB, key []byte) *State {
s := &State{db: db}
buf := db.Get(stateKey)
buf := db.Get(key)
if len(buf) == 0 {
return nil
} else {
@ -56,8 +68,7 @@ func (s *State) Copy() *State {
GenesisDoc: s.GenesisDoc,
ChainID: s.ChainID,
LastBlockHeight: s.LastBlockHeight,
LastBlockHash: s.LastBlockHash,
LastBlockParts: s.LastBlockParts,
LastBlockID: s.LastBlockID,
LastBlockTime: s.LastBlockTime,
Validators: s.Validators.Copy(),
LastValidators: s.LastValidators.Copy(),
@ -68,13 +79,83 @@ func (s *State) Copy() *State {
func (s *State) Save() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.db.SetSync(stateKey, s.Bytes())
}
func (s *State) SaveIntermediate() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.db.SetSync(stateIntermediateKey, s.Bytes())
}
// Load the intermediate state into the current state
// and do some sanity checks
func (s *State) LoadIntermediate() {
s2 := loadState(s.db, stateIntermediateKey)
if s.ChainID != s2.ChainID {
PanicSanity(Fmt("State mismatch for ChainID. Got %v, Expected %v", s2.ChainID, s.ChainID))
}
if s.LastBlockHeight+1 != s2.LastBlockHeight {
PanicSanity(Fmt("State mismatch for LastBlockHeight. Got %v, Expected %v", s2.LastBlockHeight, s.LastBlockHeight+1))
}
if !bytes.Equal(s.Validators.Hash(), s2.LastValidators.Hash()) {
PanicSanity(Fmt("State mismatch for LastValidators. Got %X, Expected %X", s2.LastValidators.Hash(), s.Validators.Hash()))
}
if !bytes.Equal(s.AppHash, s2.AppHash) {
PanicSanity(Fmt("State mismatch for AppHash. Got %X, Expected %X", s2.AppHash, s.AppHash))
}
s.setBlockAndValidators(s2.LastBlockHeight, s2.LastBlockID, s2.LastBlockTime, s2.Validators.Copy(), s2.LastValidators.Copy())
}
func (s *State) Equals(s2 *State) bool {
return bytes.Equal(s.Bytes(), s2.Bytes())
}
func (s *State) Bytes() []byte {
buf, n, err := new(bytes.Buffer), new(int), new(error)
wire.WriteBinary(s, buf, n, err)
if *err != nil {
PanicCrisis(*err)
}
s.db.Set(stateKey, buf.Bytes())
return buf.Bytes()
}
// Mutate state variables to match block and validators
// after running EndBlock
func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, prevValSet, nextValSet *types.ValidatorSet) {
s.setBlockAndValidators(header.Height,
types.BlockID{header.Hash(), blockPartsHeader}, header.Time,
prevValSet, nextValSet)
}
func (s *State) setBlockAndValidators(
height int, blockID types.BlockID, blockTime time.Time,
prevValSet, nextValSet *types.ValidatorSet) {
s.LastBlockHeight = height
s.LastBlockID = blockID
s.LastBlockTime = blockTime
s.Validators = nextValSet
s.LastValidators = prevValSet
}
func (s *State) GetValidators() (*types.ValidatorSet, *types.ValidatorSet) {
return s.LastValidators, s.Validators
}
// Load the most recent state from "state" db,
// or create a new one (and save) from genesis.
func GetState(config cfg.Config, stateDB dbm.DB) *State {
state := LoadState(stateDB)
if state == nil {
state = MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
state.Save()
}
return state
}
//-----------------------------------------------------------------------------
@ -117,8 +198,7 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State {
GenesisDoc: genDoc,
ChainID: genDoc.ChainID,
LastBlockHeight: 0,
LastBlockHash: nil,
LastBlockParts: types.PartSetHeader{},
LastBlockID: types.BlockID{},
LastBlockTime: genDoc.GenesisTime,
Validators: types.NewValidatorSet(validators),
LastValidators: types.NewValidatorSet(nil),


+ 42
- 0
state/state_test.go View File

@ -0,0 +1,42 @@
package state
import (
"testing"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/tendermint/config/tendermint_test"
)
func TestStateCopyEquals(t *testing.T) {
config := tendermint_test.ResetConfig("state_")
// Get State db
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := GetState(config, stateDB)
stateCopy := state.Copy()
if !state.Equals(stateCopy) {
t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", stateCopy, state)
}
stateCopy.LastBlockHeight += 1
if state.Equals(stateCopy) {
t.Fatal("expected states to be different. got same %v", state)
}
}
func TestStateSaveLoad(t *testing.T) {
config := tendermint_test.ResetConfig("state_")
// Get State db
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := GetState(config, stateDB)
state.LastBlockHeight += 1
state.Save()
loadedState := LoadState(stateDB)
if !state.Equals(loadedState) {
t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", loadedState, state)
}
}

+ 1
- 0
test/app/clean.sh View File

@ -1,3 +1,4 @@
killall tendermint
killall dummy
killall counter
rm -rf ~/.tendermint_app

+ 73
- 24
test/app/counter_test.sh View File

@ -1,5 +1,11 @@
#! /bin/bash
if [[ "$GRPC_BROADCAST_TX" == "" ]]; then
GRPC_BROADCAST_TX=""
fi
set -u
#####################
# counter over socket
#####################
@ -7,62 +13,105 @@ TESTNAME=$1
# Send some txs
function getCode() {
R=$1
if [[ "$R" == "{}" ]]; then
# protobuf auto adds `omitempty` to everything so code OK and empty data/log
# will not even show when marshalled into json
# apparently we can use github.com/golang/protobuf/jsonpb to do the marshalling ...
echo 0
else
# this wont actually work if theres an error ...
echo "$R" | jq .code
fi
}
function sendTx() {
TX=$1
RESPONSE=`curl -s localhost:46657/broadcast_tx_commit?tx=\"$TX\"`
CODE=`echo $RESPONSE | jq .result[1].code`
ERROR=`echo $RESPONSE | jq .error`
ERROR=$(echo "$ERROR" | tr -d '"') # remove surrounding quotes
if [[ "$GRPC_BROADCAST_TX" == "" ]]; then
RESPONSE=`curl -s localhost:46657/broadcast_tx_commit?tx=0x$TX`
ERROR=`echo $RESPONSE | jq .error`
ERROR=$(echo "$ERROR" | tr -d '"') # remove surrounding quotes
RESPONSE=`echo $RESPONSE | jq .result[1]`
else
if [ -f grpc_client ]; then
rm grpc_client
fi
echo "... building grpc_client"
go build -o grpc_client grpc_client.go
RESPONSE=`./grpc_client $TX`
ERROR=""
fi
echo "RESPONSE"
echo $RESPONSE
echo $RESPONSE | jq . &> /dev/null
IS_JSON=$?
if [[ "$IS_JSON" != "0" ]]; then
ERROR="$RESPONSE"
fi
APPEND_TX_RESPONSE=`echo $RESPONSE | jq .deliver_tx`
APPEND_TX_CODE=`getCode "$APPEND_TX_RESPONSE"`
CHECK_TX_RESPONSE=`echo $RESPONSE | jq .check_tx`
CHECK_TX_CODE=`getCode "$CHECK_TX_RESPONSE"`
echo "-------"
echo "TX $TX"
echo "RESPONSE $RESPONSE"
echo "ERROR $ERROR"
echo "----"
if [[ "$ERROR" != "" ]]; then
echo "Unexpected error sending tx ($TX): $ERROR"
exit 1
fi
}
echo "... sending tx. expect no error"
# 0 should pass once and get in block, with no error
TX=00
sendTx $TX
if [[ $CODE != 0 ]]; then
if [[ $APPEND_TX_CODE != 0 ]]; then
echo "Got non-zero exit code for $TX. $RESPONSE"
exit 1
fi
if [[ "$ERROR" != "" ]]; then
echo "Unexpected error. Tx $TX should have been included in a block. $ERROR"
exit 1
fi
echo "... sending tx. expect error"
# second time should get rejected by the mempool (return error and non-zero code)
sendTx $TX
if [[ $CODE == 0 ]]; then
echo "CHECKTX CODE: $CHECK_TX_CODE"
if [[ "$CHECK_TX_CODE" == 0 ]]; then
echo "Got zero exit code for $TX. Expected tx to be rejected by mempool. $RESPONSE"
exit 1
fi
if [[ "$ERROR" == "" ]]; then
echo "Expected to get an error - tx $TX should have been rejected from mempool"
echo "$RESPONSE"
exit 1
fi
echo "... sending tx. expect no error"
# now, TX=01 should pass, with no error
TX=01
sendTx $TX
if [[ $CODE != 0 ]]; then
if [[ $APPEND_TX_CODE != 0 ]]; then
echo "Got non-zero exit code for $TX. $RESPONSE"
exit 1
fi
if [[ "$ERROR" != "" ]]; then
echo "Unexpected error. Tx $TX should have been accepted in block. $ERROR"
exit 1
fi
echo "... sending tx. expect no error, but invalid"
# now, TX=03 should get in a block (passes CheckTx, no error), but is invalid
TX=03
sendTx $TX
if [[ $CODE == 0 ]]; then
echo "Got zero exit code for $TX. Should have been bad nonce. $RESPONSE"
if [[ "$CHECK_TX_CODE" != 0 ]]; then
echo "Got non-zero exit code for checktx on $TX. $RESPONSE"
exit 1
fi
if [[ "$ERROR" != "" ]]; then
echo "Unexpected error. Tx $TX should have been included in a block. $ERROR"
if [[ $APPEND_TX_CODE == 0 ]]; then
echo "Got zero exit code for $TX. Should have been bad nonce. $RESPONSE"
exit 1
fi


+ 18
- 12
test/app/dummy_test.sh View File

@ -2,7 +2,8 @@
set -e
function toHex() {
echo -n $1 | hexdump -ve '1/1 "%.2X"'
echo -n $1 | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}'
}
#####################
@ -13,20 +14,23 @@ TESTNAME=$1
# store key value pair
KEY="abcd"
VALUE="dcba"
curl -s 127.0.0.1:46657/broadcast_tx_commit?tx=\"$(toHex $KEY=$VALUE)\"
echo $(toHex $KEY=$VALUE)
curl -s 127.0.0.1:46657/broadcast_tx_commit?tx=$(toHex $KEY=$VALUE)
echo $?
echo ""
###########################
# test using the tmsp-cli
# test using the abci-cli
###########################
echo "... testing query with abci-cli"
# we should be able to look up the key
RESPONSE=`tmsp-cli query $KEY`
RESPONSE=`abci-cli query \"$KEY\"`
set +e
A=`echo $RESPONSE | grep exists=true`
A=`echo $RESPONSE | grep '"exists":true'`
if [[ $? != 0 ]]; then
echo "Failed to find 'exists=true' for $KEY. Response:"
echo "$RESPONSE"
@ -35,9 +39,9 @@ fi
set -e
# we should not be able to look up the value
RESPONSE=`tmsp-cli query $VALUE`
RESPONSE=`abci-cli query \"$VALUE\"`
set +e
A=`echo $RESPONSE | grep exists=true`
A=`echo $RESPONSE | grep '"exists":true'`
if [[ $? == 0 ]]; then
echo "Found 'exists=true' for $VALUE when we should not have. Response:"
echo "$RESPONSE"
@ -46,15 +50,17 @@ fi
set -e
#############################
# test using the /tmsp_query
# test using the /abci_query
#############################
echo "... testing query with /abci_query"
# we should be able to look up the key
RESPONSE=`curl -s 127.0.0.1:46657/tmsp_query?query=\"$(toHex $KEY)\"`
RESPONSE=`curl -s 127.0.0.1:46657/abci_query?query=$(toHex $KEY)`
RESPONSE=`echo $RESPONSE | jq .result[1].result.Data | xxd -r -p`
set +e
A=`echo $RESPONSE | grep exists=true`
A=`echo $RESPONSE | grep '"exists":true'`
if [[ $? != 0 ]]; then
echo "Failed to find 'exists=true' for $KEY. Response:"
echo "$RESPONSE"
@ -63,10 +69,10 @@ fi
set -e
# we should not be able to look up the value
RESPONSE=`curl -s 127.0.0.1:46657/tmsp_query?query=\"$(toHex $VALUE)\"`
RESPONSE=`curl -s 127.0.0.1:46657/abci_query?query=\"$(toHex $VALUE)\"`
RESPONSE=`echo $RESPONSE | jq .result[1].result.Data | xxd -r -p`
set +e
A=`echo $RESPONSE | grep exists=true`
A=`echo $RESPONSE | grep '"exists":true'`
if [[ $? == 0 ]]; then
echo "Found 'exists=true' for $VALUE when we should not have. Response:"
echo "$RESPONSE"


+ 36
- 0
test/app/grpc_client.go View File

@ -0,0 +1,36 @@
package main
import (
"encoding/hex"
"fmt"
"os"
"golang.org/x/net/context"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/rpc/grpc"
)
var grpcAddr = "tcp://localhost:36656"
func main() {
args := os.Args
if len(args) == 1 {
fmt.Println("Must enter a transaction to send (hex)")
os.Exit(1)
}
tx := args[1]
txBytes, err := hex.DecodeString(tx)
if err != nil {
fmt.Println("Invalid hex", err)
os.Exit(1)
}
clientGRPC := core_grpc.StartGRPCClient(grpcAddr)
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{txBytes})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(string(wire.JSONBytes(res)))
}

+ 29
- 6
test/app/test.sh View File

@ -13,7 +13,7 @@ export TMROOT=$HOME/.tendermint_app
function dummy_over_socket(){
rm -rf $TMROOT
tendermint init
echo "Starting dummy and tendermint"
echo "Starting dummy_over_socket"
dummy > /dev/null &
pid_dummy=$!
tendermint node > tendermint.log &
@ -30,7 +30,7 @@ function dummy_over_socket(){
function dummy_over_socket_reorder(){
rm -rf $TMROOT
tendermint init
echo "Starting tendermint and dummy"
echo "Starting dummy_over_socket_reorder (ie. start tendermint first)"
tendermint node > tendermint.log &
pid_tendermint=$!
sleep 2
@ -48,7 +48,7 @@ function dummy_over_socket_reorder(){
function counter_over_socket() {
rm -rf $TMROOT
tendermint init
echo "Starting counter and tendermint"
echo "Starting counter_over_socket"
counter --serial > /dev/null &
pid_counter=$!
tendermint node > tendermint.log &
@ -64,10 +64,10 @@ function counter_over_socket() {
function counter_over_grpc() {
rm -rf $TMROOT
tendermint init
echo "Starting counter and tendermint"
counter --serial --tmsp grpc > /dev/null &
echo "Starting counter_over_grpc"
counter --serial --abci grpc > /dev/null &
pid_counter=$!
tendermint node --tmsp grpc > tendermint.log &
tendermint node --abci grpc > tendermint.log &
pid_tendermint=$!
sleep 5
@ -77,6 +77,24 @@ function counter_over_grpc() {
kill -9 $pid_counter $pid_tendermint
}
function counter_over_grpc_grpc() {
rm -rf $TMROOT
tendermint init
echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)"
counter --serial --abci grpc > /dev/null &
pid_counter=$!
sleep 1
GRPC_PORT=36656
tendermint node --abci grpc --grpc_laddr tcp://localhost:$GRPC_PORT > tendermint.log &
pid_tendermint=$!
sleep 5
echo "running test"
GRPC_BROADCAST_TX=true bash counter_test.sh "Counter over GRPC via GRPC BroadcastTx"
kill -9 $pid_counter $pid_tendermint
}
cd $GOPATH/src/github.com/tendermint/tendermint/test/app
case "$1" in
@ -92,6 +110,9 @@ case "$1" in
"counter_over_grpc")
counter_over_grpc
;;
"counter_over_grpc_grpc")
counter_over_grpc_grpc
;;
*)
echo "Running all"
dummy_over_socket
@ -101,5 +122,7 @@ case "$1" in
counter_over_socket
echo ""
counter_over_grpc
echo ""
counter_over_grpc_grpc
esac

+ 4
- 1
test/docker/Dockerfile View File

@ -19,7 +19,10 @@ RUN make get_vendor_deps
COPY . $REPO
RUN go install ./cmd/tendermint
RUN bash scripts/install_tmsp_apps.sh
RUN bash scripts/install_abci_apps.sh
# expose the volume for debugging
VOLUME $REPO
EXPOSE 46656
EXPOSE 46657

+ 26
- 0
test/net/setup.sh View File

@ -0,0 +1,26 @@
#! /bin/bash
set -eu
# grab glide for dependency mgmt
go get github.com/Masterminds/glide
# grab network monitor, install mintnet, netmon
# these might err
echo "... fetching repos. ignore go get errors"
set +e
go get github.com/tendermint/network_testing
go get github.com/tendermint/mintnet
go get github.com/tendermint/netmon
set -e
# install vendored deps
echo "GOPATH $GOPATH"
cd $GOPATH/src/github.com/tendermint/mintnet
echo "... install mintnet dir $(pwd)"
glide install
go install
cd $GOPATH/src/github.com/tendermint/netmon
echo "... install netmon dir $(pwd)"
glide install
go install

+ 34
- 0
test/net/start.sh View File

@ -0,0 +1,34 @@
#! /bin/bash
set -eu
# start a testnet and benchmark throughput using mintnet+netmon via the network_testing repo
DATACENTER=single
VALSETSIZE=4
BLOCKSIZE=8092
TX_SIZE=200
NTXS=$((BLOCKSIZE*4))
RESULTSDIR=results
CLOUD_PROVIDER=digitalocean
set +u
if [[ "$MACH_PREFIX" == "" ]]; then
MACH_PREFIX=mach
fi
set -u
export TMHEAD=`git rev-parse --abbrev-ref HEAD`
export TM_IMAGE="tendermint/tmbase"
cd $GOPATH/src/github.com/tendermint/network_testing
echo "... running network test $(pwd)"
bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER
# TODO: publish result!
# cleanup
echo "... destroying machines"
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE]

+ 4
- 54
test/net/test.sh View File

@ -1,58 +1,8 @@
#! /bin/bash
set -eu
# start a testnet and benchmark throughput using mintnet+netmon via the network_testing repo
DATACENTER=single
VALSETSIZE=4
BLOCKSIZE=8092
TX_SIZE=200
NTXS=$((BLOCKSIZE*4))
RESULTSDIR=results
CLOUD_PROVIDER=digitalocean
set +u
if [[ "$MACH_PREFIX" == "" ]]; then
MACH_PREFIX=mach
fi
set -u
export TMHEAD=`git rev-parse --abbrev-ref HEAD`
export TM_IMAGE="tendermint/tmbase"
# grab glide for dependency mgmt
go get github.com/Masterminds/glide
# grab network monitor, install mintnet, netmon
# these might err
echo "... fetching repos. ignore go get errors"
set +e
go get github.com/tendermint/network_testing
go get github.com/tendermint/mintnet
go get github.com/tendermint/netmon
set -e
# install vendored deps
echo "GOPATH $GOPATH"
cd $GOPATH/src/github.com/tendermint/mintnet
echo "... install mintnet dir $(pwd)"
glide install
go install
cd $GOPATH/src/github.com/tendermint/netmon
echo "... install netmon dir $(pwd)"
glide install
go install
cd $GOPATH/src/github.com/tendermint/network_testing
echo "... running network test $(pwd)"
bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER
# TODO: publish result!
# cleanup
echo "... destroying machines"
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE]
# install mintnet, netmon, fetch network_testing
bash test/net/setup.sh
# start the testnet
bash test/net/start.sh

+ 7
- 38
test/p2p/atomic_broadcast/test.sh View File

@ -1,59 +1,28 @@
#! /bin/bash
set -u
N=$1
###################################################################
# wait for all peers to come online
# assumes peers are already synced up
# test sending txs
# for each peer:
# wait to have 3 peers
# wait to be at height > 1
# send a tx, wait for commit
# assert app hash on every peer reflects the post tx state
###################################################################
N=4
# wait for everyone to come online
echo "Waiting for nodes to come online"
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
curl -s $addr/status > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
done
echo "... node $i is up"
done
echo ""
# run the test on each of them
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
# - assert everyone has 3 other peers
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
while [ "$N_PEERS" != 3 ]; do
echo "Waiting for node $i to connect to all peers ..."
sleep 1
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
done
# - assert block height is greater than 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
while [ "$BLOCK_HEIGHT" -le 1 ]; do
echo "Waiting for node $i to commit a block ..."
sleep 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
done
echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT"
# current state
HASH1=`curl -s $addr/status | jq .result[1].latest_app_hash`
# - send a tx
TX=\"aadeadbeefbeefbeef0$i\"
TX=aadeadbeefbeefbeef0$i
echo "Broadcast Tx $TX"
curl -s $addr/broadcast_tx_commit?tx=$TX
curl -s $addr/broadcast_tx_commit?tx=0x$TX
echo ""
# we need to wait another block to get the new app_hash


+ 53
- 0
test/p2p/basic/test.sh View File

@ -0,0 +1,53 @@
#! /bin/bash
set -u
N=$1
###################################################################
# wait for all peers to come online
# for each peer:
# wait to have N-1 peers
# wait to be at height > 1
###################################################################
# wait for everyone to come online
echo "Waiting for nodes to come online"
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
curl -s $addr/status > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
done
echo "... node $i is up"
done
echo ""
# wait for each of them to sync up
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
N_1=$(($N - 1))
# - assert everyone has N-1 other peers
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
while [ "$N_PEERS" != $N_1 ]; do
echo "Waiting for node $i to connect to all peers ..."
sleep 1
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
done
# - assert block height is greater than 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
while [ "$BLOCK_HEIGHT" -le 1 ]; do
echo "Waiting for node $i to commit a block ..."
sleep 1
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
done
echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT"
done
echo ""
echo "PASS"
echo ""

+ 4
- 3
test/p2p/client.sh View File

@ -6,13 +6,14 @@ NETWORK_NAME=$2
ID=$3
CMD=$4
NAME=test_container_$ID
echo "starting test client container with CMD=$CMD"
# run the test container on the local network
docker run -t \
docker run -t --rm \
-v $GOPATH/src/github.com/tendermint/tendermint/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p \
--net=$NETWORK_NAME \
--ip=$(test/p2p/ip.sh "-1") \
--name test_container_$ID \
--name $NAME \
--entrypoint bash \
$DOCKER_IMAGE $CMD

+ 1
- 1
test/p2p/data/app/init.sh View File

@ -1,5 +1,5 @@
#! /bin/bash
# This is a sample bash script for a TMSP application
# This is a sample bash script for a ABCI application
cd app/
git clone https://github.com/tendermint/nomnomcoin.git


+ 1
- 1
test/p2p/data/core/init.sh View File

@ -8,7 +8,7 @@ BRANCH="master"
go get -d $TMREPO/cmd/tendermint
### DEPENDENCIES (example)
# cd $GOPATH/src/github.com/tendermint/tmsp
# cd $GOPATH/src/github.com/tendermint/abci
# git fetch origin $BRANCH
# git checkout $BRANCH
### DEPENDENCIES END


+ 43
- 0
test/p2p/fast_sync/check_peer.sh View File

@ -0,0 +1,43 @@
#! /bin/bash
set -eu
set -o pipefail
ID=$1
###########################################
#
# Wait for peer to catchup to other peers
#
###########################################
addr=$(test/p2p/ip.sh $ID):46657
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1
peer_addr=$(test/p2p/ip.sh $peerID):46657
# get another peer's height
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height`
# get another peer's state
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash`
echo "Other peer is on height $h1 with state $root1"
echo "Waiting for peer $ID to catch up"
# wait for it to sync to past its previous height
set +e
set +o pipefail
h2="0"
while [[ "$h2" -lt "$(($h1+3))" ]]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
echo "... $h2"
done
# check the app hash
root2=`curl -s $addr/status | jq .result[1].latest_app_hash`
if [[ "$root1" != "$root2" ]]; then
echo "App hash after fast sync does not match. Got $root2; expected $root1"
exit 1
fi
echo "... fast sync successful"

+ 8
- 36
test/p2p/fast_sync/test.sh View File

@ -1,44 +1,16 @@
#! /bin/bash
set -eu
set -o pipefail
###############################################################
# for each peer:
# kill peer
# bring it back online via fast sync
# check app hash
###############################################################
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
PROXY_APP=$4
ID=$1
cd $GOPATH/src/github.com/tendermint/tendermint
addr=$(test/p2p/ip.sh $ID):46657
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1
peer_addr=$(test/p2p/ip.sh $peerID):46657
# get another peer's height
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height`
# get another peer's state
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash`
echo "Other peer is on height $h1 with state $root1"
echo "Waiting for peer $ID to catch up"
# wait for it to sync to past its previous height
set +e
set +o pipefail
h2="0"
while [[ "$h2" -lt "$(($h1+3))" ]]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
echo "... $h2"
# run it on each of them
for i in `seq 1 $N`; do
bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N $PROXY_APP
done
# check the app hash
root2=`curl -s $addr/status | jq .result[1].latest_app_hash`
if [[ "$root1" != "$root2" ]]; then
echo "App hash after fast sync does not match. Got $root2; expected $root1"
exit 1
fi
echo "... fast sync successful"

+ 38
- 0
test/p2p/fast_sync/test_peer.sh View File

@ -0,0 +1,38 @@
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
ID=$3
N=$4
PROXY_APP=$5
###############################################################
# this runs on each peer:
# kill peer
# bring it back online via fast sync
# wait for it to sync and check the app hash
###############################################################
echo "Testing fastsync on node $ID"
# kill peer
set +e # circle sigh :(
docker rm -vf local_testnet_$ID
set -e
# restart peer - should have an empty blockchain
SEEDS="$(test/p2p/ip.sh 1):46656"
for j in `seq 2 $N`; do
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656"
done
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP $SEEDS
# wait for peer to sync and check the app hash
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$ID "test/p2p/fast_sync/check_peer.sh $ID"
echo ""
echo "PASS"
echo ""

+ 48
- 0
test/p2p/kill_all/check_peers.sh View File

@ -0,0 +1,48 @@
#! /bin/bash
set -eu
NUM_OF_PEERS=$1
# how many attempts for each peer to catch up by height
MAX_ATTEMPTS_TO_CATCH_UP=20
echo "Waiting for nodes to come online"
set +e
for i in $(seq 1 "$NUM_OF_PEERS"); do
addr=$(test/p2p/ip.sh "$i"):46657
curl -s "$addr/status" > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s "$addr/status" > /dev/null
ERR=$?
done
echo "... node $i is up"
done
set -e
# get the first peer's height
addr=$(test/p2p/ip.sh 1):46657
h1=$(curl -s "$addr/status" | jq .result[1].latest_block_height)
echo "1st peer is on height $h1"
echo "Waiting until other peers reporting a height higher than the 1st one"
for i in $(seq 2 "$NUM_OF_PEERS"); do
attempt=1
hi=0
while [[ $hi -le $h1 ]] ; do
addr=$(test/p2p/ip.sh "$i"):46657
hi=$(curl -s "$addr/status" | jq .result[1].latest_block_height)
echo "... peer $i is on height $hi"
((attempt++))
if [ "$attempt" -ge $MAX_ATTEMPTS_TO_CATCH_UP ] ; then
echo "$attempt unsuccessful attempts were made to catch up"
exit 1
fi
sleep 1
done
done

+ 32
- 0
test/p2p/kill_all/test.sh View File

@ -0,0 +1,32 @@
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
NUM_OF_PEERS=$3
NUM_OF_CRASHES=$4
cd "$GOPATH/src/github.com/tendermint/tendermint"
###############################################################
# NUM_OF_CRASHES times:
# restart all peers
# wait for them to sync and check that they are making progress
###############################################################
for i in $(seq 1 "$NUM_OF_CRASHES"); do
echo ""
echo "Restarting all peers! Take $i ..."
# restart all peers
for j in $(seq 1 "$NUM_OF_PEERS"); do
docker stop "local_testnet_$j"
docker start "local_testnet_$j"
done
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" kill_all_$i "test/p2p/kill_all/check_peers.sh $NUM_OF_PEERS"
done
echo ""
echo "PASS"
echo ""

test/p2p/local_testnet.sh → test/p2p/local_testnet_start.sh View File


+ 12
- 0
test/p2p/local_testnet_stop.sh View File

@ -0,0 +1,12 @@
#! /bin/bash
set -u
NETWORK_NAME=$1
N=$2
for i in `seq 1 $N`; do
docker stop local_testnet_$i
docker rm -vf local_testnet_$i
done
docker network rm $NETWORK_NAME

+ 8
- 7
test/p2p/peer.sh View File

@ -4,9 +4,10 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
ID=$3
APP_PROXY=$4
set +u
SEEDS=$4
SEEDS=$5
set -u
if [[ "$SEEDS" != "" ]]; then
SEEDS=" --seeds $SEEDS "
@ -15,9 +16,9 @@ fi
echo "starting tendermint peer ID=$ID"
# start tendermint container on the network
docker run -d \
--net=$NETWORK_NAME \
--ip=$(test/p2p/ip.sh $ID) \
--name local_testnet_$ID \
--entrypoint tendermint \
-e TMROOT=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core \
$DOCKER_IMAGE node $SEEDS --proxy_app=dummy
--net=$NETWORK_NAME \
--ip=$(test/p2p/ip.sh $ID) \
--name local_testnet_$ID \
--entrypoint tendermint \
-e TMROOT=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core \
$DOCKER_IMAGE node $SEEDS --proxy_app=$APP_PROXY

+ 21
- 26
test/p2p/test.sh View File

@ -3,36 +3,31 @@ set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=local_testnet
N=4
PROXY_APP=persistent_dummy
cd $GOPATH/src/github.com/tendermint/tendermint
# stop the existing testnet and remove local network
set +e
bash test/p2p/local_testnet_stop.sh $NETWORK_NAME $N
set -e
# start the testnet on a local network
bash test/p2p/local_testnet.sh $DOCKER_IMAGE $NETWORK_NAME
# NOTE we re-use the same network for all tests
bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
# test atomic broadcast
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME ab test/p2p/atomic_broadcast/test.sh
# test basic connectivity and consensus
# start client container and check the num peers and height for all nodes
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME basic "test/p2p/basic/test.sh $N"
# test fast sync (from current state of network)
# run it on each of them
N=4
for i in `seq 1 $N`; do
echo "Testing fasysync on node $i"
# kill peer
set +e # circle sigh :(
docker rm -vf local_testnet_$i
set -e
# restart peer - should have an empty blockchain
SEEDS="$(test/p2p/ip.sh 1):46656"
for j in `seq 2 $N`; do
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656"
done
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $SEEDS
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$i "test/p2p/fast_sync/test.sh $i"
done
echo ""
echo "PASS"
echo ""
# test atomic broadcast:
# start client container and test sending a tx to each node
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME ab "test/p2p/atomic_broadcast/test.sh $N"
# test fast sync (from current state of network):
# for each node, kill it and readd via fast sync
bash test/p2p/fast_sync/test.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
# test killing all peers
bash test/p2p/kill_all/test.sh $DOCKER_IMAGE $NETWORK_NAME $N 3

+ 5
- 0
test/persist/test.sh View File

@ -0,0 +1,5 @@
#! /bin/bash
cd $GOPATH/src/github.com/tendermint/tendermint
bash ./test/persist/test_failure_indices.sh

+ 104
- 0
test/persist/test_failure_indices.sh View File

@ -0,0 +1,104 @@
#! /bin/bash
export TMROOT=$HOME/.tendermint_persist
rm -rf $TMROOT
tendermint init
function start_procs(){
name=$1
indexToFail=$2
echo "Starting persistent dummy and tendermint"
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" &
PID_DUMMY=$!
if [[ "$indexToFail" == "" ]]; then
# run in background, dont fail
tendermint node --log_level=debug &> tendermint_${name}.log &
PID_TENDERMINT=$!
else
# run in foreground, fail
FAIL_TEST_INDEX=$indexToFail tendermint node --log_level=debug &> tendermint_${name}.log
PID_TENDERMINT=$!
fi
}
function kill_procs(){
kill -9 $PID_DUMMY $PID_TENDERMINT
wait $PID_DUMMY
wait $PID_TENDERMINT
}
# wait till node is up, send txs
function send_txs(){
addr="127.0.0.1:46657"
curl -s $addr/status > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
done
# send a bunch of txs over a few blocks
echo "Node is up, sending txs"
for i in `seq 1 5`; do
for j in `seq 1 100`; do
tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'`
curl -s $addr/broadcast_tx_async?tx=0x$tx &> /dev/null
done
sleep 1
done
}
failsStart=0
fails=`grep -r "fail.Fail" --include \*.go . | wc -l`
failsEnd=$(($fails-1))
for failIndex in `seq $failsStart $failsEnd`; do
echo ""
echo "* Test FailIndex $failIndex"
# test failure at failIndex
send_txs &
start_procs 1 $failIndex
# tendermint should fail when it hits the fail index
kill -9 $PID_DUMMY
wait $PID_DUMMY
start_procs 2
# wait for node to handshake and make a new block
addr="localhost:46657"
curl -s $addr/status > /dev/null
ERR=$?
i=0
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
i=$(($i + 1))
if [[ $i == 10 ]]; then
echo "Timed out waiting for tendermint to start"
exit 1
fi
done
# wait for a new block
h1=`curl -s $addr/status | jq .result[1].latest_block_height`
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
done
kill_procs
echo "* Passed Test for FailIndex $failIndex"
echo ""
done
echo "Passed Test: Persistence"

+ 70
- 0
test/persist/test_simple.sh View File

@ -0,0 +1,70 @@
#! /bin/bash
export TMROOT=$HOME/.tendermint_persist
rm -rf $TMROOT
tendermint init
function start_procs(){
name=$1
echo "Starting persistent dummy and tendermint"
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" &
PID_DUMMY=$!
tendermint node &> tendermint_${name}.log &
PID_TENDERMINT=$!
sleep 5
}
function kill_procs(){
kill -9 $PID_DUMMY $PID_TENDERMINT
}
function send_txs(){
# send a bunch of txs over a few blocks
echo "Sending txs"
for i in `seq 1 5`; do
for j in `seq 1 100`; do
tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'`
curl -s 127.0.0.1:46657/broadcast_tx_async?tx=0x$tx &> /dev/null
done
sleep 1
done
}
start_procs 1
send_txs
kill_procs
start_procs 2
# wait for node to handshake and make a new block
addr="localhost:46657"
curl -s $addr/status > /dev/null
ERR=$?
i=0
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
i=$(($i + 1))
if [[ $i == 10 ]]; then
echo "Timed out waiting for tendermint to start"
exit 1
fi
done
# wait for a new block
h1=`curl -s $addr/status | jq .result[1].latest_block_height`
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
done
kill_procs
sleep 2
echo "Passed Test: Persistence"

+ 3
- 0
test/run_test.sh View File

@ -11,6 +11,9 @@ bash test/test_cover.sh
# run the app tests
bash test/app/test.sh
# run the persistence test
bash test/persist/test.sh
if [[ "$BRANCH" == "master" || $(echo "$BRANCH" | grep "release-") != "" ]]; then
echo ""
echo "* branch $BRANCH; testing libs"


+ 6
- 2
test/test.sh View File

@ -10,6 +10,8 @@ bash ./test/docker/build.sh
echo ""
echo "* running go tests and app tests in docker container"
# sometimes its helpful to mount the local test folder
# -v $GOPATH/src/github.com/tendermint/tendermint/test:/go/src/github.com/tendermint/tendermint/test
docker run --name run_test -t tester bash test/run_test.sh
# copy the coverage results out of docker container
@ -25,6 +27,8 @@ bash test/p2p/test.sh tester
BRANCH=`git rev-parse --abbrev-ref HEAD`
if [[ $(echo "$BRANCH" | grep "release-") != "" ]]; then
echo ""
echo "* branch $BRANCH; running mintnet/netmon throughput benchmark"
bash test/net/test.sh
echo "TODO: run network tests"
#echo "* branch $BRANCH; running mintnet/netmon throughput benchmark"
# TODO: replace mintnet
#bash test/net/test.sh
fi

+ 1
- 1
test/test_cover.sh View File

@ -5,7 +5,7 @@ PKGS=$(go list github.com/tendermint/tendermint/... | grep -v /vendor/)
set -e
echo "mode: atomic" > coverage.txt
for pkg in ${PKGS[@]}; do
go test -race -coverprofile=profile.out -covermode=atomic $pkg
go test -timeout 30m -race -coverprofile=profile.out -covermode=atomic $pkg
if [ -f profile.out ]; then
tail -n +2 profile.out >> coverage.txt;
rm profile.out


+ 4
- 1
test/test_libs.sh View File

@ -13,7 +13,7 @@ fi
####################
LIBS_GO_TEST=(go-clist go-common go-config go-crypto go-db go-events go-merkle go-p2p)
LIBS_MAKE_TEST=(go-rpc go-wire tmsp)
LIBS_MAKE_TEST=(go-rpc go-wire abci)
for lib in "${LIBS_GO_TEST[@]}"; do
@ -28,17 +28,20 @@ for lib in "${LIBS_GO_TEST[@]}"; do
fi
done
DIR=`pwd`
for lib in "${LIBS_MAKE_TEST[@]}"; do
# checkout vendored version of lib
bash scripts/glide/checkout.sh $GLIDE $lib
echo "Testing $lib ..."
cd $GOPATH/src/github.com/tendermint/$lib
make test
if [[ "$?" != 0 ]]; then
echo "FAIL"
exit 1
fi
cd $DIR
done
echo ""


+ 90
- 33
types/block.go View File

@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"fmt"
"io"
"strings"
"time"
@ -20,9 +21,31 @@ type Block struct {
LastCommit *Commit `json:"last_commit"`
}
// TODO: version
func MakeBlock(height int, chainID string, txs []Tx, commit *Commit,
prevBlockID BlockID, valHash, appHash []byte, partSize int) (*Block, *PartSet) {
block := &Block{
Header: &Header{
ChainID: chainID,
Height: height,
Time: time.Now(),
NumTxs: len(txs),
LastBlockID: prevBlockID,
ValidatorsHash: valHash,
AppHash: appHash, // state merkle root of txs from the previous block.
},
LastCommit: commit,
Data: &Data{
Txs: txs,
},
}
block.FillHeader()
return block, block.MakePartSet(partSize)
}
// Basic validation that doesn't involve state data.
func (b *Block) ValidateBasic(chainID string, lastBlockHeight int, lastBlockHash []byte,
lastBlockParts PartSetHeader, lastBlockTime time.Time, appHash []byte) error {
func (b *Block) ValidateBasic(chainID string, lastBlockHeight int, lastBlockID BlockID,
lastBlockTime time.Time, appHash []byte) error {
if b.ChainID != chainID {
return errors.New(Fmt("Wrong Block.Header.ChainID. Expected %v, got %v", chainID, b.ChainID))
}
@ -39,11 +62,8 @@ func (b *Block) ValidateBasic(chainID string, lastBlockHeight int, lastBlockHash
if b.NumTxs != len(b.Data.Txs) {
return errors.New(Fmt("Wrong Block.Header.NumTxs. Expected %v, got %v", len(b.Data.Txs), b.NumTxs))
}
if !bytes.Equal(b.LastBlockHash, lastBlockHash) {
return errors.New(Fmt("Wrong Block.Header.LastBlockHash. Expected %X, got %X", lastBlockHash, b.LastBlockHash))
}
if !b.LastBlockParts.Equals(lastBlockParts) {
return errors.New(Fmt("Wrong Block.Header.LastBlockParts. Expected %v, got %v", lastBlockParts, b.LastBlockParts))
if !b.LastBlockID.Equals(lastBlockID) {
return errors.New(Fmt("Wrong Block.Header.LastBlockID. Expected %v, got %v", lastBlockID, b.LastBlockID))
}
if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) {
return errors.New(Fmt("Wrong Block.Header.LastCommitHash. Expected %X, got %X", b.LastCommitHash, b.LastCommit.Hash()))
@ -83,8 +103,8 @@ func (b *Block) Hash() []byte {
return b.Header.Hash()
}
func (b *Block) MakePartSet() *PartSet {
return NewPartSetFromData(wire.BinaryBytes(b))
func (b *Block) MakePartSet(partSize int) *PartSet {
return NewPartSetFromData(wire.BinaryBytes(b), partSize)
}
// Convenience.
@ -130,16 +150,15 @@ func (b *Block) StringShort() string {
//-----------------------------------------------------------------------------
type Header struct {
ChainID string `json:"chain_id"`
Height int `json:"height"`
Time time.Time `json:"time"`
NumTxs int `json:"num_txs"`
LastBlockHash []byte `json:"last_block_hash"`
LastBlockParts PartSetHeader `json:"last_block_parts"`
LastCommitHash []byte `json:"last_commit_hash"`
DataHash []byte `json:"data_hash"`
ValidatorsHash []byte `json:"validators_hash"`
AppHash []byte `json:"app_hash"` // state merkle root of txs from the previous block
ChainID string `json:"chain_id"`
Height int `json:"height"`
Time time.Time `json:"time"`
NumTxs int `json:"num_txs"` // XXX: Can we get rid of this?
LastBlockID BlockID `json:"last_block_id"`
LastCommitHash []byte `json:"last_commit_hash"` // commit from validators from the last block
DataHash []byte `json:"data_hash"` // transactions
ValidatorsHash []byte `json:"validators_hash"` // validators for the current block
AppHash []byte `json:"app_hash"` // state after txs from the previous block
}
// NOTE: hash is nil if required fields are missing.
@ -148,16 +167,15 @@ func (h *Header) Hash() []byte {
return nil
}
return merkle.SimpleHashFromMap(map[string]interface{}{
"ChainID": h.ChainID,
"Height": h.Height,
"Time": h.Time,
"NumTxs": h.NumTxs,
"LastBlock": h.LastBlockHash,
"LastBlockParts": h.LastBlockParts,
"LastCommit": h.LastCommitHash,
"Data": h.DataHash,
"Validators": h.ValidatorsHash,
"App": h.AppHash,
"ChainID": h.ChainID,
"Height": h.Height,
"Time": h.Time,
"NumTxs": h.NumTxs,
"LastBlockID": h.LastBlockID,
"LastCommit": h.LastCommitHash,
"Data": h.DataHash,
"Validators": h.ValidatorsHash,
"App": h.AppHash,
})
}
@ -170,8 +188,7 @@ func (h *Header) StringIndented(indent string) string {
%s Height: %v
%s Time: %v
%s NumTxs: %v
%s LastBlock: %X
%s LastBlockParts: %v
%s LastBlockID: %v
%s LastCommit: %X
%s Data: %X
%s Validators: %X
@ -181,8 +198,7 @@ func (h *Header) StringIndented(indent string) string {
indent, h.Height,
indent, h.Time,
indent, h.NumTxs,
indent, h.LastBlockHash,
indent, h.LastBlockParts,
indent, h.LastBlockID,
indent, h.LastCommitHash,
indent, h.DataHash,
indent, h.ValidatorsHash,
@ -197,6 +213,7 @@ type Commit struct {
// NOTE: The Precommits are in order of address to preserve the bonded ValidatorSet order.
// Any peer with a block can gossip precommits by index with a peer without recalculating the
// active ValidatorSet.
BlockID BlockID `json:"blockID"`
Precommits []*Vote `json:"precommits"`
// Volatile
@ -268,10 +285,15 @@ func (commit *Commit) IsCommit() bool {
}
func (commit *Commit) ValidateBasic() error {
if commit.BlockID.IsZero() {
return errors.New("Commit cannot be for nil block")
}
if len(commit.Precommits) == 0 {
return errors.New("No precommits in commit")
}
height, round := commit.Height(), commit.Round()
// validate the precommits
for _, precommit := range commit.Precommits {
// It's OK for precommits to be missing.
if precommit == nil {
@ -316,8 +338,10 @@ func (commit *Commit) StringIndented(indent string) string {
precommitStrings[i] = precommit.String()
}
return fmt.Sprintf(`Commit{
%s BlockID: %v
%s Precommits: %v
%s}#%X`,
indent, commit.BlockID,
indent, strings.Join(precommitStrings, "\n"+indent+" "),
indent, commit.hash)
}
@ -360,3 +384,36 @@ func (data *Data) StringIndented(indent string) string {
indent, strings.Join(txStrings, "\n"+indent+" "),
indent, data.hash)
}
//--------------------------------------------------------------------------------
type BlockID struct {
Hash []byte `json:"hash"`
PartsHeader PartSetHeader `json:"parts"`
}
func (blockID BlockID) IsZero() bool {
return len(blockID.Hash) == 0 && blockID.PartsHeader.IsZero()
}
func (blockID BlockID) Equals(other BlockID) bool {
return bytes.Equal(blockID.Hash, other.Hash) &&
blockID.PartsHeader.Equals(other.PartsHeader)
}
func (blockID BlockID) Key() string {
return string(blockID.Hash) + string(wire.BinaryBytes(blockID.PartsHeader))
}
func (blockID BlockID) WriteSignBytes(w io.Writer, n *int, err *error) {
if blockID.IsZero() {
wire.WriteTo([]byte("null"), w, n, err)
} else {
wire.WriteJSON(CanonicalBlockID(blockID), w, n, err)
}
}
func (blockID BlockID) String() string {
return fmt.Sprintf(`%X:%v`, blockID.Hash, blockID.PartsHeader)
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save