Browse Source

Merge pull request #426 from tendermint/release-0.9.0

Release 0.9.0
pull/428/head v0.9.0
Ethan Buchman 8 years ago
committed by GitHub
parent
commit
d4f6254551
113 changed files with 4941 additions and 1659 deletions
  1. +15
    -0
      .editorconfig
  2. +41
    -0
      .github/ISSUE_TEMPLATE
  3. +2
    -0
      .gitignore
  4. +220
    -0
      CHANGELOG.md
  5. +16
    -0
      CONTRIBUTING.md
  6. +35
    -44
      DOCKER/Dockerfile
  7. +35
    -0
      DOCKER/Dockerfile.develop
  8. +15
    -0
      DOCKER/Makefile
  9. +46
    -15
      DOCKER/README.md
  10. +0
    -10
      DOCKER/run.sh
  11. +46
    -31
      Makefile
  12. +6
    -2
      README.md
  13. +13
    -8
      Vagrantfile
  14. +1
    -1
      blockchain/pool.go
  15. +28
    -26
      blockchain/reactor.go
  16. +6
    -6
      blockchain/store.go
  17. +14
    -19
      circle.yml
  18. +2
    -10
      cmd/tendermint/gen_validator.go
  19. +24
    -13
      cmd/tendermint/init.go
  20. +25
    -12
      cmd/tendermint/main.go
  21. +3
    -3
      cmd/tendermint/reset_priv_validator.go
  22. +59
    -0
      cmd/tendermint/run_node.go
  23. +1
    -1
      config/tendermint/config.go
  24. +1
    -1
      config/tendermint_test/config.go
  25. +1
    -1
      consensus/byzantine_test.go
  26. +4
    -4
      consensus/common_test.go
  27. +8
    -17
      consensus/mempool_test.go
  28. +16
    -15
      consensus/reactor.go
  29. +187
    -200
      consensus/replay.go
  30. +271
    -0
      consensus/replay_file.go
  31. +437
    -27
      consensus/replay_test.go
  32. +24
    -47
      consensus/state.go
  33. +3
    -3
      consensus/state_test.go
  34. +48
    -3
      consensus/test_data/build.sh
  35. +65
    -0
      consensus/test_data/many_blocks.cswal
  36. +2
    -2
      consensus/wal.go
  37. +16
    -0
      docs/architecture/ABCI.md
  38. +16
    -0
      docs/architecture/README.md
  39. +240
    -0
      docs/architecture/merkle-frey.md
  40. +17
    -0
      docs/architecture/merkle.md
  41. +42
    -27
      glide.lock
  42. +34
    -10
      glide.yaml
  43. +4
    -4
      mempool/mempool.go
  44. +2
    -3
      mempool/reactor.go
  45. +131
    -175
      node/node.go
  46. +4
    -4
      node/node_test.go
  47. +4
    -4
      proxy/app_conn.go
  48. +2
    -3
      proxy/client.go
  49. +4
    -4
      proxy/multi_app_conn.go
  50. +65
    -0
      rpc/client/event_test.go
  51. +88
    -0
      rpc/client/helpers.go
  52. +76
    -0
      rpc/client/helpers_test.go
  53. +349
    -0
      rpc/client/httpclient.go
  54. +82
    -0
      rpc/client/interface.go
  55. +105
    -0
      rpc/client/localclient.go
  56. +24
    -0
      rpc/client/main_test.go
  57. +194
    -0
      rpc/client/mock/abci.go
  58. +169
    -0
      rpc/client/mock/abci_test.go
  59. +128
    -0
      rpc/client/mock/client.go
  60. +55
    -0
      rpc/client/mock/status.go
  61. +45
    -0
      rpc/client/mock/status_test.go
  62. +179
    -0
      rpc/client/rpc_test.go
  63. +14
    -10
      rpc/core/abci.go
  64. +25
    -0
      rpc/core/blocks.go
  65. +3
    -2
      rpc/core/net.go
  66. +20
    -28
      rpc/core/pipe.go
  67. +12
    -3
      rpc/core/routes.go
  68. +1
    -1
      rpc/core/status.go
  69. +12
    -7
      rpc/core/types/responses.go
  70. +76
    -135
      rpc/test/client_test.go
  71. +6
    -10
      rpc/test/grpc_test.go
  72. +90
    -73
      rpc/test/helpers.go
  73. +36
    -0
      rpc/test/main_test.go
  74. +51
    -0
      scripts/dist.sh
  75. +54
    -0
      scripts/dist_build.sh
  76. +12
    -0
      scripts/tendermint-builder/Dockerfile
  77. +16
    -16
      state/errors.go
  78. +19
    -166
      state/execution.go
  79. +0
    -210
      state/execution_test.go
  80. +5
    -32
      state/state.go
  81. +6
    -4
      test/README.md
  82. +13
    -13
      test/app/dummy_test.sh
  83. +13
    -5
      test/docker/Dockerfile
  84. +3
    -8
      test/net/start.sh
  85. +54
    -0
      test/p2p/README.md
  86. +4
    -4
      test/p2p/client.sh
  87. +1
    -1
      test/p2p/fast_sync/test_peer.sh
  88. +1
    -0
      test/p2p/kill_all/check_peers.sh
  89. +12
    -10
      test/p2p/local_testnet_start.sh
  90. +4
    -4
      test/p2p/local_testnet_stop.sh
  91. +26
    -11
      test/p2p/peer.sh
  92. +17
    -0
      test/p2p/pex/check_peer.sh
  93. +31
    -0
      test/p2p/pex/dial_seeds.sh
  94. +15
    -0
      test/p2p/pex/test.sh
  95. +57
    -0
      test/p2p/pex/test_addrbook.sh
  96. +36
    -0
      test/p2p/pex/test_dial_seeds.sh
  97. +12
    -0
      test/p2p/seeds.sh
  98. +13
    -8
      test/p2p/test.sh
  99. +1
    -1
      test/persist/test.sh
  100. +59
    -40
      test/persist/test_failure_indices.sh

+ 15
- 0
.editorconfig View File

@ -0,0 +1,15 @@
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[Makefile]
indent_style = tab
[*.sh]
indent_style = tab

+ 41
- 0
.github/ISSUE_TEMPLATE View File

@ -0,0 +1,41 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app)
in a case of bug.
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Merkleeyes version** (use `git rev-parse --verify HEAD`, skip if you don't use it):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Anything else do we need to know**:

+ 2
- 0
.gitignore View File

@ -12,3 +12,5 @@ remote_dump
vendor
.vagrant
test/p2p/data/
test/logs
.glide

+ 220
- 0
CHANGELOG.md View File

@ -0,0 +1,220 @@
# Changelog
## 0.9.0 (March 6, 2017)
BREAKING CHANGES:
- Update ABCI to v0.4.0, where Query is now `Query(RequestQuery) ResponseQuery`, enabling precise proofs at particular heights:
```
message RequestQuery{
bytes data = 1;
string path = 2;
uint64 height = 3;
bool prove = 4;
}
message ResponseQuery{
CodeType code = 1;
int64 index = 2;
bytes key = 3;
bytes value = 4;
bytes proof = 5;
uint64 height = 6;
string log = 7;
}
```
- `BlockMeta` data type unifies its Hash and PartSetHash under a `BlockID`:
```
type BlockMeta struct {
BlockID BlockID `json:"block_id"` // the block hash and partsethash
Header *Header `json:"header"` // The block's Header
}
```
- `tendermint gen_validator` command output is now pure JSON
- `ValidatorSet` data type:
- expose a `Proposer` field. Note this means the `Proposer` is persisted with the `State`.
- change `.Proposer()` to `.GetProposer()`
FEATURES:
- New RPC endpoint `/commit?height=X` returns header and commit for block at height `X`
- Client API for each endpoint, including mocks for testing
IMPROVEMENTS:
- `Node` is now a `BaseService`
- Simplified starting Tendermint in-process from another application
- Better organized Makefile
- Scripts for auto-building binaries across platforms
- Docker image improved, slimmed down (using Alpine), and changed from tendermint/tmbase to tendermint/tendermint
- New repo files: `CONTRIBUTING.md`, Github `ISSUE_TEMPLATE`, `CHANGELOG.md`
- Improvements on CircleCI for managing build/test artifacts
- Handshake replay is doen through the consensus package, possibly using a mockApp
- Graceful shutdown of RPC listeners
- Tests for the PEX reactor and DialSeeds
BUG FIXES:
- Check peer.Send for failure before updating PeerState in consensus
- Fix panic in `/dial_seeds` with invalid addresses
- Fix proposer selection logic in ValidatorSet by taking the address into account in the `accumComparable`
- Fix inconcistencies with `ValidatorSet.Proposer` across restarts by persisting it in the `State`
## 0.8.0 (January 13, 2017)
BREAKING CHANGES:
- New data type `BlockID` to represent blocks:
```
type BlockID struct {
Hash []byte `json:"hash"`
PartsHeader PartSetHeader `json:"parts"`
}
```
- `Vote` data type now includes validator address and index:
```
type Vote struct {
ValidatorAddress []byte `json:"validator_address"`
ValidatorIndex int `json:"validator_index"`
Height int `json:"height"`
Round int `json:"round"`
Type byte `json:"type"`
BlockID BlockID `json:"block_id"` // zero if vote is nil.
Signature crypto.Signature `json:"signature"`
}
```
- Update TMSP to v0.3.0, where it is now called ABCI and AppendTx is DeliverTx
- Hex strings in the RPC are now "0x" prefixed
FEATURES:
- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23,
in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts:
```
type VoteSetMaj23Message struct {
Height int
Round int
Type byte
BlockID types.BlockID
}
```
- Configurable block part set size
- Validator set changes
- Optionally skip TimeoutCommit if we have all the votes
- Handshake between Tendermint and App on startup to sync latest state and ensure consistent recovery from crashes
- GRPC server for BroadcastTx endpoint
IMPROVEMENTS:
- Less verbose logging
- Better test coverage (37% -> 49%)
- Canonical SignBytes for signable types
- Write-Ahead Log for Mempool and Consensus via go-autofile
- Better in-process testing for the consensus reactor and byzantine faults
- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points
- Better abstraction over timeout mechanics
BUG FIXES:
- Fix memory leak in mempool peer
- Fix panic on POLRound=-1
- Actually set the CommitTime
- Actually send BeginBlock message
- Fix a liveness issues caused by Byzantine proposals/votes. Uses the new `Maj23Msg`.
## 0.7.4 (December 14, 2016)
FEATURES:
- Enable the Peer Exchange reactor with the `--pex` flag for more resilient gossip network (feature still in development, beware dragons)
IMPROVEMENTS:
- Remove restrictions on RPC endpoint `/dial_seeds` to enable manual network configuration
## 0.7.3 (October 20, 2016)
IMPROVEMENTS:
- Type safe FireEvent
- More WAL/replay tests
- Cleanup some docs
BUG FIXES:
- Fix deadlock in mempool for synchronous apps
- Replay handles non-empty blocks
- Fix race condition in HeightVoteSet
## 0.7.2 (September 11, 2016)
BUG FIXES:
- Set mustConnect=false so tendermint will retry connecting to the app
## 0.7.1 (September 10, 2016)
FEATURES:
- New TMSP connection for Query/Info
- New RPC endpoints:
- `tmsp_query`
- `tmsp_info`
- Allow application to filter peers through Query (off by default)
IMPROVEMENTS:
- TMSP connection type enforced at compile time
- All listen/client urls use a "tcp://" or "unix://" prefix
BUG FIXES:
- Save LastSignature/LastSignBytes to `priv_validator.json` for recovery
- Fix event unsubscribe
- Fix fastsync/blockchain reactor
## 0.7.0 (August 7, 2016)
BREAKING CHANGES:
- Strict SemVer starting now!
- Update to ABCI v0.2.0
- Validation types now called Commit
- NewBlock event only returns the block header
FEATURES:
- TMSP and RPC support TCP and UNIX sockets
- Addition config options including block size and consensus parameters
- New WAL mode `cswal_light`; logs only the validator's own votes
- New RPC endpoints:
- for starting/stopping profilers, and for updating config
- `/broadcast_tx_commit`, returns when tx is included in a block, else an error
- `/unsafe_flush_mempool`, empties the mempool
IMPROVEMENTS:
- Various optimizations
- Remove bad or invalidated transactions from the mempool cache (allows later duplicates)
- More elaborate testing using CircleCI including benchmarking throughput on 4 digitalocean droplets
BUG FIXES:
- Various fixes to WAL and replay logic
- Various race conditions

+ 16
- 0
CONTRIBUTING.md View File

@ -0,0 +1,16 @@
# Contributing guidelines
**Thanks for considering making contributions to Tendermint!**
Please follow standard github best practices: fork the repo, **branch from the
tip of develop**, make some commits, test your code changes with `make test`,
and submit a pull request to develop.
See the [open issues](https://github.com/tendermint/tendermint/issues) for
things we need help with!
Please make sure to use `gofmt` before every commit - the easiest way to do
this is have your editor run it for you upon saving a file.
You can read the full guide [on our
site](https://tendermint.com/docs/guides/contributing).

+ 35
- 44
DOCKER/Dockerfile View File

@ -1,54 +1,45 @@
# Pull base image.
FROM golang:1.6
FROM alpine:3.5
ENV USER tmuser
ENV DATA_ROOT /data/tendermint
# This is the release of tendermint to pull in.
ENV TM_VERSION 0.8.0
# Set user right away for determinism
RUN groupadd -r $USER \
&& useradd -r -s /bin/false -g $USER $USER
# Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private
# validator file into /tendermint.
#
# The /tendermint/data dir is used by tendermint to store state.
ENV DATA_ROOT /tendermint
ENV TMROOT $DATA_ROOT
# Create home directory for USER
# Needed for nodejs/nom
RUN mkdir -p /home/$USER \
&& chown -R $USER:$USER /home/$USER
# Set user right away for determinism
RUN addgroup tmuser && \
adduser -S -G tmuser tmuser
# Create directory for persistence and give our user ownership
RUN mkdir -p $DATA_ROOT \
&& chown -R $USER:$USER $DATA_ROOT
# Set the env variables to non-interactive
ENV DEBIAN_FRONTEND noninteractive
ENV DEBIAN_PRIORITY critical
ENV DEBCONF_NOWARNINGS yes
ENV TERM linux
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
# Grab deps (git)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git && \
rm -rf /var/lib/apt/lists/*
# Grab deps (node)
RUN curl -sL https://deb.nodesource.com/setup_5.x | bash -
RUN apt-get update && \
apt-get install -y --no-install-recommends \
nodejs && \
rm -rf /var/lib/apt/lists/*
# Copy run.sh
COPY ./run.sh $DATA_ROOT/run.sh
RUN chmod +x $DATA_ROOT/run.sh
# Persist data, set user
WORKDIR $DATA_ROOT
RUN mkdir -p $DATA_ROOT && \
chown -R tmuser:tmuser $DATA_ROOT
# jq and curl used for extracting `pub_key` from private validator while
# deploying tendermint with Kubernetes. It is nice to have bash so the users
# could execute bash commands.
RUN apk add --no-cache bash curl jq
RUN apk add --no-cache openssl && \
wget https://s3-us-west-2.amazonaws.com/tendermint/${TM_VERSION}/tendermint_linux_amd64.zip && \
echo "83f6bd52055ebc93434a68263c6666a4de41e0e543d0b5a06ad461262c460f4c tendermint_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_linux_amd64.zip && \
apk del openssl && \
rm -f tendermint_linux_amd64.zip
# Expose the data directory as a volume since there's mutable state in there
VOLUME $DATA_ROOT
USER $USER
ENV TMROOT $DATA_ROOT
# p2p port
EXPOSE 46656
# rpc port
EXPOSE 46657
# Run tendermint
CMD ["./run.sh"]
ENTRYPOINT ["tendermint"]
# By default you'll get the dummy app
CMD ["node", "--moniker=`hostname`", "--proxy_app=dummy"]

+ 35
- 0
DOCKER/Dockerfile.develop View File

@ -0,0 +1,35 @@
FROM alpine:3.5
ENV DATA_ROOT /tendermint
ENV TMROOT $DATA_ROOT
RUN addgroup tmuser && \
adduser -S -G tmuser tmuser
RUN mkdir -p $DATA_ROOT && \
chown -R tmuser:tmuser $DATA_ROOT
RUN apk add --no-cache bash curl jq
ENV GOPATH /go
ENV PATH "$PATH:/go/bin"
RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
apk add --no-cache go build-base git && \
cd /go/src/github.com/tendermint/tendermint && \
git clone https://github.com/tendermint/tendermint . && \
git checkout develop && \
make get_vendor_deps && \
make install && \
glide cc && \
cd - && \
rm -rf /go/src/github.com/tendermint/tendermint && \
apk del go build-base git
VOLUME $DATA_ROOT
EXPOSE 46656
EXPOSE 46657
ENTRYPOINT ["tendermint"]
CMD ["node", "--moniker=`hostname`", "--proxy_app=dummy"]

+ 15
- 0
DOCKER/Makefile View File

@ -0,0 +1,15 @@
build:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
push:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker push "tendermint/tendermint" "tendermint/tendermint:$TAG" "tendermint/tendermint:$TAG_NO_PATCH"
build_develop:
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
push_develop:
docker push "tendermint/tendermint:develop"
.PHONY: build build_develop push push_develop

+ 46
- 15
DOCKER/README.md View File

@ -1,24 +1,55 @@
# Docker
# Supported tags and respective `Dockerfile` links
Tendermint uses docker for deployment of testnets via the [mintnet](github.com/tendermint/mintnet) tool.
- `0.8.0`, `0.8`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
- `develop` [(Dockerfile)]()
For faster development iterations (ie. to avoid docker builds),
the dockerfile just sets up the OS, and tendermint is fetched/installed at runtime.
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.
For the deterministic docker builds used in testing, see the [tests directory](https://github.com/tendermint/tendermint/tree/master/test)
# Tendermint
# Build and run a docker image and container
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
These are notes for the dev team.
For more background, see the [introduction](https://tendermint.com/intro).
```
# Build base Docker image
# Make sure ./run.sh exists.
docker build -t tendermint/tmbase -f Dockerfile .
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
# How to use this image
## Start one instance of the Tendermint core with the `dummy` app
# Log into dockerhub
docker login
A very simple example of a built-in app and Tendermint core in one container.
# Push latest build to dockerhub
docker push tendermint/tmbase
```
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint
```
## mintnet-kubernetes
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
# Supported Docker versions
This image is officially supported on Docker version 1.13.1.
Support for older versions (down to 1.6) is provided on a best-effort basis.
Please see [the Docker installation documentation](https://docs.docker.com/installation/) for details on how to upgrade your Docker daemon.
# License
View [license information](https://raw.githubusercontent.com/tendermint/tendermint/master/LICENSE) for the software contained in this image.
# User Feedback
## Issues
If you have any problems with or questions about this image, please contact us through a [GitHub](https://github.com/tendermint/tendermint/issues) issue. If the issue is related to a CVE, please check for [a `cve-tracker` issue on the `official-images` repository](https://github.com/docker-library/official-images/issues?q=label%3Acve-tracker) first.
You can also reach the image maintainers via [Slack](http://forum.tendermint.com:3000/).
## Contributing
You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.
Before you start to code, we recommend discussing your plans through a [GitHub](https://github.com/tendermint/tendermint/issues) issue, especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing.

+ 0
- 10
DOCKER/run.sh View File

@ -1,10 +0,0 @@
#! /bin/bash
mkdir -p $GOPATH/src/$TMREPO
cd $GOPATH/src/$TMREPO
git clone https://$TMREPO.git .
git fetch
git reset --hard $TMHEAD
go get -d $TMREPO/cmd/tendermint
make
tendermint node --seeds="$TMSEEDS" --moniker="$TMNAME"

+ 46
- 31
Makefile View File

@ -1,59 +1,74 @@
.PHONY: get_deps build all list_deps install
all: get_deps install test
GOTOOLS = \
github.com/mitchellh/gox \
github.com/Masterminds/glide
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
TMROOT = $${TMROOT:-$$HOME/.tendermint}
define NEWLINE
all: install test
endef
NOVENDOR = go list github.com/tendermint/tendermint/... | grep -v /vendor/
install: get_deps
go install github.com/tendermint/tendermint/cmd/tendermint
install: get_vendor_deps
@go install ./cmd/tendermint
build:
go build -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
go build -o build/tendermint ./cmd/tendermint
build_race:
go build -race -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
go build -race -o build/tendermint ./cmd/tendermint
# dist builds binaries for all platforms and packages them for distribution
dist:
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
test: build
go test `${NOVENDOR}`
test:
@echo "--> Running go test"
@go test $(PACKAGES)
test_race: build
go test -race `${NOVENDOR}`
test_race:
@echo "--> Running go test --race"
@go test -v -race $(PACKAGES)
test_integrations:
bash ./test/test.sh
@bash ./test/test.sh
test100: build
for i in {1..100}; do make test; done
test100:
@for i in {1..100}; do make test; done
draw_deps:
# requires brew install graphviz
go get github.com/hirokidaichi/goviz
goviz -i github.com/tendermint/tendermint/cmd/tendermint | dot -Tpng -o huge.png
goviz -i ./cmd/tendermint | dot -Tpng -o huge.png
list_deps:
go list -f '{{join .Deps "\n"}}' github.com/tendermint/tendermint/... | \
@go list -f '{{join .Deps "\n"}}' ./... | \
grep -v /vendor/ | sort | uniq | \
xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
get_deps:
go get -d `${NOVENDOR}`
go list -f '{{join .TestImports "\n"}}' github.com/tendermint/tendermint/... | \
@echo "--> Running go get"
@go get -v -d $(PACKAGES)
@go list -f '{{join .TestImports "\n"}}' ./... | \
grep -v /vendor/ | sort | uniq | \
xargs go get
xargs go get -v -d
get_vendor_deps:
go get github.com/Masterminds/glide
rm -rf vendor/
glide install
get_vendor_deps: ensure_tools
@rm -rf vendor/
@echo "--> Running glide install"
@glide install
update_deps:
go get -d -u github.com/tendermint/tendermint/...
update_deps: tools
@echo "--> Updating dependencies"
@go get -d -u ./...
revision:
-echo `git rev-parse --verify HEAD` > $(TMROOT)/revision
-echo `git rev-parse --verify HEAD` >> $(TMROOT)/revision_history
tools:
go get -u -v $(GOTOOLS)
ensure_tools:
go get $(GOTOOLS)
.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools

+ 6
- 2
README.md View File

@ -27,13 +27,17 @@ To get started developing applications, see the [application developers guide](h
## Install
To download pre-built binaries, see our [downloads page](https://tendermint.com/intro/getting-started/download).
To install from source, you should be able to:
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
For more details (or if it fails), see the [install guide](https://tendermint.com/intro/getting-started/install).
For more details (or if it fails), see the [install guide](https://tendermint.com/docs/guides/install).
## Contributing
Yay open source! Please see our [contributing guidelines](https://tendermint.com/guides/contributing).
Yay open source! Please see our [contributing guidelines](https://tendermint.com/docs/guides/contributing).
## Resources


+ 13
- 8
Vagrantfile View File

@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/trusty64"
config.vm.provider "virtualbox" do |v|
v.memory = 2048
v.memory = 3072
v.cpus = 2
end
@ -15,19 +15,24 @@ Vagrant.configure("2") do |config|
wget -qO- https://get.docker.com/ | sh
usermod -a -G docker vagrant
apt-get autoremove -y
curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz
tar -xvf go1.7.linux-amd64.tar.gz
curl -O https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz
tar -xvf go1.8.linux-amd64.tar.gz
rm -rf /usr/local/go
mv go /usr/local
echo 'export PATH=$PATH:/usr/local/go/bin' >> /home/vagrant/.profile
rm -f go1.8.linux-amd64.tar.gz
mkdir -p /home/vagrant/go/bin
chown -R vagrant:vagrant /home/vagrant/go
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.profile
echo 'export PATH=$PATH:/usr/local/go/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
mkdir -p /home/vagrant/go/src/github.com/tendermint
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
su - vagrant -c 'curl https://glide.sh/get | sh'
su - vagrant -c 'cd /vagrant/ && glide install && make test'
chown -R vagrant:vagrant /home/vagrant/go
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_vendor_deps'
SHELL
end

+ 1
- 1
blockchain/pool.go View File

@ -26,7 +26,7 @@ var peerTimeoutSeconds = time.Duration(15) // not const so we can override with
in sequence from peers that reported higher heights than ours.
Every so often we ask peers what height they're on so we can keep going.
Requests are continuously made for blocks of heigher heights until
Requests are continuously made for blocks of higher heights until
the limits. If most of the requests have no available peers, and we
are not at peer limits, we can probably switch to consensus reactor
*/


+ 28
- 26
blockchain/reactor.go View File

@ -3,11 +3,10 @@ package blockchain
import (
"bytes"
"errors"
"fmt"
"reflect"
"time"
. "github.com/tendermint/go-common"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
@ -17,7 +16,9 @@ import (
)
const (
BlockchainChannel = byte(0x40)
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
defaultChannelCapacity = 100
defaultSleepIntervalMS = 500
trySyncIntervalMS = 100
@ -55,12 +56,13 @@ type BlockchainReactor struct {
evsw types.EventSwitch
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
if state.LastBlockHeight == store.Height()-1 {
store.height -= 1 // XXX HACK, make this better
store.height-- // XXX HACK, make this better
}
if state.LastBlockHeight != store.Height() {
PanicSanity(Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
}
requestsCh := make(chan BlockRequest, defaultChannelCapacity)
timeoutsCh := make(chan string, defaultChannelCapacity)
@ -83,6 +85,7 @@ func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy
return bcR
}
// OnStart implements BaseService
func (bcR *BlockchainReactor) OnStart() error {
bcR.BaseReactor.OnStart()
if bcR.fastSync {
@ -95,12 +98,13 @@ func (bcR *BlockchainReactor) OnStart() error {
return nil
}
// OnStop implements BaseService
func (bcR *BlockchainReactor) OnStop() {
bcR.BaseReactor.OnStop()
bcR.pool.Stop()
}
// Implements Reactor
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{
@ -111,19 +115,19 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
}
}
// Implements Reactor
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) {
// Send peer our state.
peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
// doing nothing, will try later in `poolRoutine`
}
}
// Implements Reactor
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
// Remove peer from the pool.
bcR.pool.RemovePeer(peer.Key)
}
// Implements Reactor
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
@ -159,7 +163,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.Key, msg.Height)
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
log.Warn(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
}
@ -242,10 +246,10 @@ FOR_LOOP:
// NOTE: we could improve performance if we
// didn't make the app commit to disk every block
// ... but we would need a way to get the hash without it persisting
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, sm.MockMempool{})
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, types.MockMempool{})
if err != nil {
// TODO This is bad, are we zombie?
PanicQ(Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
bcR.state.Save()
}
@ -257,17 +261,13 @@ FOR_LOOP:
}
}
func (bcR *BlockchainReactor) BroadcastStatusResponse() error {
bcR.Switch.Broadcast(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
return nil
}
// BroadcastStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
bcR.Switch.Broadcast(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}})
return nil
}
// implements events.Eventable
// SetEventSwitch implements events.Eventable
func (bcR *BlockchainReactor) SetEventSwitch(evsw types.EventSwitch) {
bcR.evsw = evsw
}
@ -282,6 +282,7 @@ const (
msgTypeStatusRequest = byte(0x21)
)
// BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface{}
var _ = wire.RegisterInterface(
@ -292,6 +293,7 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&bcStatusRequestMessage{}, msgTypeStatusRequest},
)
// DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
msgType = bz[0]
@ -299,7 +301,7 @@ func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
r := bytes.NewReader(bz)
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxBlockchainResponseSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
if err != nil && n != len(bz) {
err = errors.New("DecodeMessage() had bytes left over.")
err = errors.New("DecodeMessage() had bytes left over")
}
return
}
@ -311,7 +313,7 @@ type bcBlockRequestMessage struct {
}
func (m *bcBlockRequestMessage) String() string {
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height)
}
//-------------------------------------
@ -322,7 +324,7 @@ type bcBlockResponseMessage struct {
}
func (m *bcBlockResponseMessage) String() string {
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height)
}
//-------------------------------------
@ -332,7 +334,7 @@ type bcStatusRequestMessage struct {
}
func (m *bcStatusRequestMessage) String() string {
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height)
}
//-------------------------------------
@ -342,5 +344,5 @@ type bcStatusResponseMessage struct {
}
func (m *bcStatusResponseMessage) String() string {
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height)
}

+ 6
- 6
blockchain/store.go View File

@ -64,12 +64,12 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
if r == nil {
return nil
}
meta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
}
bytez := []byte{}
for i := 0; i < meta.PartsHeader.Total; i++ {
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
part := bs.LoadBlockPart(height, i)
bytez = append(bytez, part.Bytes...)
}
@ -101,11 +101,11 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
if r == nil {
return nil
}
meta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
}
return meta
return blockMeta
}
// The +2/3 and other Precommit-votes for block at `height`.
@ -154,8 +154,8 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
}
// Save block meta
meta := types.NewBlockMeta(block, blockParts)
metaBytes := wire.BinaryBytes(meta)
blockMeta := types.NewBlockMeta(block, blockParts)
metaBytes := wire.BinaryBytes(blockMeta)
bs.db.Set(calcBlockMetaKey(height), metaBytes)
// Save block parts


+ 14
- 19
circle.yml View File

@ -1,38 +1,33 @@
---
machine:
environment:
MACH_PREFIX: tendermint-test-mach
GOPATH: /home/ubuntu/.go_workspace
REPO: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
DOCKER_VERSION: 1.10.0
DOCKER_MACHINE_VERSION: 0.6.0
DOCKER_MACHINE_VERSION: 0.9.0
GOPATH: "$HOME/.go_project"
PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME"
PROJECT_PATH: "$PROJECT_PARENT_PATH/$CIRCLE_PROJECT_REPONAME"
hosts:
circlehost: 127.0.0.1
localhost: 127.0.0.1
checkout:
post:
- rm -rf $REPO
- mkdir -p $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME
- mv $HOME/$CIRCLE_PROJECT_REPONAME $REPO
# - git submodule sync
# - git submodule update --init # use submodules
dependencies:
override:
- echo $MACH_PREFIX $GOPATH $REPO $DOCKER_VERSION $DOCKER_MACHINE_VERSION
- curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | sudo bash -s -- $DOCKER_VERSION
- sudo curl -sSL -o /usr/bin/docker-machine https://github.com/docker/machine/releases/download/v$DOCKER_MACHINE_VERSION/docker-machine-linux-x86_64; sudo chmod 0755 /usr/bin/docker-machine
- sudo start docker
- sudo curl -sSL -o /usr/bin/docker-machine "https://github.com/docker/machine/releases/download/v$DOCKER_MACHINE_VERSION/docker-machine-`uname -s`-`uname -m`"; sudo chmod 0755 /usr/bin/docker-machine
- mkdir -p "$PROJECT_PARENT_PATH"
- ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH"
post:
- go version
- docker version
- docker-machine version
test:
override:
- "cd $REPO && set -o pipefail && make test_integrations | tee ~/test_integrations.log":
- cd "$PROJECT_PATH" && set -o pipefail && make test_integrations 2>&1 | tee test_integrations.log:
timeout: 1800
- "cp ~/test_integrations.log $CIRCLE_ARTIFACTS"
post:
- "cd $REPO && bash <(curl -s https://codecov.io/bash)"
- cd "$PROJECT_PATH" && mv test_integrations.log "${CIRCLE_ARTIFACTS}"
- cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt
- cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}"
- cd "$PROJECT_PATH" && cp test/logs/messages "${CIRCLE_ARTIFACTS}/docker_logs.txt"

+ 2
- 10
cmd/tendermint/gen_validator.go View File

@ -8,16 +8,8 @@ import (
)
func gen_validator() {
privValidator := types.GenPrivValidator()
privValidatorJSONBytes := wire.JSONBytesPretty(privValidator)
fmt.Printf(`Generated a new validator!
Paste the following JSON into your %v file
%v
`,
config.GetString("priv_validator_file"),
string(privValidatorJSONBytes),
)
fmt.Printf(`%v
`, string(privValidatorJSONBytes))
}

+ 24
- 13
cmd/tendermint/init.go View File

@ -1,24 +1,35 @@
package main
import (
. "github.com/tendermint/go-common"
"os"
cmn "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
)
func init_files() {
privValidator := types.GenPrivValidator()
privValidator.SetFile(config.GetString("priv_validator_file"))
privValidator.Save()
privValFile := config.GetString("priv_validator_file")
if _, err := os.Stat(privValFile); os.IsNotExist(err) {
privValidator := types.GenPrivValidator()
privValidator.SetFile(privValFile)
privValidator.Save()
genDoc := types.GenesisDoc{
ChainID: Fmt("test-chain-%v", RandStr(6)),
}
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
PubKey: privValidator.PubKey,
Amount: 10,
}}
genFile := config.GetString("genesis_file")
genDoc.SaveAs(config.GetString("genesis_file"))
if _, err := os.Stat(genFile); os.IsNotExist(err) {
genDoc := types.GenesisDoc{
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
}
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
PubKey: privValidator.PubKey,
Amount: 10,
}}
log.Notice("Initialized tendermint", "genesis", config.GetString("genesis_file"), "priv_validator", config.GetString("priv_validator_file"))
genDoc.SaveAs(genFile)
}
log.Notice("Initialized tendermint", "genesis", config.GetString("genesis_file"), "priv_validator", config.GetString("priv_validator_file"))
} else {
log.Notice("Already initialized", "priv_validator", config.GetString("priv_validator_file"))
}
}

+ 25
- 12
cmd/tendermint/main.go View File

@ -4,11 +4,10 @@ import (
"fmt"
"os"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-logger"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/version"
)
@ -21,11 +20,16 @@ func main() {
fmt.Println(`Tendermint
Commands:
node Run the tendermint node
show_validator Show this node's validator info
gen_validator Generate new validator keypair
probe_upnp Test UPnP functionality
version Show version info
init Initialize tendermint
node Run the tendermint node
show_validator Show this node's validator info
gen_validator Generate new validator keypair
probe_upnp Test UPnP functionality
replay <walfile> Replay messages from WAL
replay_console <walfile> Replay messages from WAL in a console
unsafe_reset_all (unsafe) Remove all the data and WAL, reset this node's validator
unsafe_reset_priv_validator (unsafe) Reset this node's validator
version Show version info
`)
return
}
@ -39,12 +43,20 @@ Commands:
switch args[0] {
case "node":
node.RunNode(config)
run_node(config)
case "replay":
if len(args) > 2 && args[1] == "console" {
node.RunReplayConsole(config, args[2])
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], false)
} else {
node.RunReplay(config, args[1])
fmt.Println("replay requires an argument (walfile)")
os.Exit(1)
}
case "replay_console":
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], true)
} else {
fmt.Println("replay_console requires an argument (walfile)")
os.Exit(1)
}
case "init":
init_files()
@ -61,6 +73,7 @@ Commands:
case "version":
fmt.Println(version.Version)
default:
Exit(Fmt("Unknown command %v\n", args[0]))
fmt.Printf("Unknown command %v\n", args[0])
os.Exit(1)
}
}

+ 3
- 3
cmd/tendermint/reset_priv_validator.go View File

@ -6,15 +6,15 @@ import (
"github.com/tendermint/tendermint/types"
)
// NOTE: this is totally unsafe.
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func reset_all() {
reset_priv_validator()
os.RemoveAll(config.GetString("db_dir"))
os.RemoveAll(config.GetString("cs_wal_dir"))
os.Remove(config.GetString("cs_wal_file"))
}
// NOTE: this is totally unsafe.
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func reset_priv_validator() {
// Get PrivValidator


+ 59
- 0
cmd/tendermint/run_node.go View File

@ -0,0 +1,59 @@
package main
import (
"io/ioutil"
"time"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/types"
)
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
// should import github.com/tendermint/tendermint/node and implement
// their own run_node to call node.NewNode (instead of node.NewNodeDefault)
// with their custom priv validator and/or custom proxy.ClientCreator
func run_node(config cfg.Config) {
// Wait until the genesis doc becomes available
// This is for Mintnet compatibility.
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GetString("genesis_file")
if !FileExists(genDocFile) {
log.Notice(Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
}
genDoc, err := types.GenesisDocFromJSON(jsonBlob)
if err != nil {
Exit(Fmt("Error reading GenesisDoc: %v", err))
}
if genDoc.ChainID == "" {
Exit(Fmt("Genesis doc %v must include non-empty chain_id", genDocFile))
}
config.Set("chain_id", genDoc.ChainID)
}
}
// Create & start node
n := node.NewNodeDefault(config)
if _, err := n.Start(); err != nil {
Exit(Fmt("Failed to start node: %v", err))
} else {
log.Notice("Started node", "nodeInfo", n.Switch().NodeInfo())
}
// Trap signal, run forever.
n.RunForever()
}

+ 1
- 1
config/tendermint/config.go View File

@ -72,7 +72,7 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("grpc_laddr", "")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cs_wal_dir", rootDir+"/data/cs.wal")
mapConfig.SetDefault("cs_wal_file", rootDir+"/data/cs.wal/wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)


+ 1
- 1
config/tendermint_test/config.go View File

@ -86,7 +86,7 @@ func ResetConfig(localPath string) cfg.Config {
mapConfig.SetDefault("grpc_laddr", "tcp://0.0.0.0:36658")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cs_wal_dir", rootDir+"/data/cs.wal")
mapConfig.SetDefault("cs_wal_file", rootDir+"/data/cs.wal/wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)


+ 1
- 1
consensus/byzantine_test.go View File

@ -242,7 +242,7 @@ func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !br.reactor.fastSync {
br.reactor.sendNewRoundStepMessage(peer)
br.reactor.sendNewRoundStepMessages(peer)
}
}
func (br *ByzantineReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {


+ 4
- 4
consensus/common_test.go View File

@ -11,6 +11,8 @@ import (
"testing"
"time"
abcicli "github.com/tendermint/abci/client"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
@ -20,8 +22,6 @@ import (
mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
abcicli "github.com/tendermint/abci/client"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/abci/example/counter"
"github.com/tendermint/abci/example/dummy"
@ -320,7 +320,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
ensureDir(path.Dir(thisConfig.GetString("cs_wal_file")), 0700) // dir for wal
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], appFunc())
css[i].SetTimeoutTicker(tickerFunc())
}
@ -336,7 +336,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
ensureDir(path.Dir(thisConfig.GetString("cs_wal_file")), 0700) // dir for wal
var privVal *types.PrivValidator
if i < nValidators {
privVal = privVals[i]


+ 8
- 17
consensus/mempool_test.go View File

@ -5,9 +5,9 @@ import (
"testing"
"time"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common"
)
@ -81,15 +81,12 @@ func TestRmBadTx(t *testing.T) {
// check for the tx
for {
time.Sleep(time.Second)
select {
case <-ch:
default:
txs := cs.mempool.Reap(1)
if len(txs) == 0 {
ch <- struct{}{}
}
txs := cs.mempool.Reap(1)
if len(txs) == 0 {
ch <- struct{}{}
return
}
}
}()
@ -114,6 +111,8 @@ func TestRmBadTx(t *testing.T) {
// CounterApplication that maintains a mempool state and resets it upon commit
type CounterApplication struct {
abci.BaseApplication
txCount int
mempoolTxCount int
}
@ -126,10 +125,6 @@ func (app *CounterApplication) Info() abci.ResponseInfo {
return abci.ResponseInfo{Data: Fmt("txs:%v", app.txCount)}
}
func (app *CounterApplication) SetOption(key string, value string) (log string) {
return ""
}
func (app *CounterApplication) DeliverTx(tx []byte) abci.Result {
return runTx(tx, &app.txCount)
}
@ -160,7 +155,3 @@ func (app *CounterApplication) Commit() abci.Result {
return abci.NewResultOK(hash, "")
}
}
func (app *CounterApplication) Query(query []byte) abci.Result {
return abci.NewResultOK(nil, Fmt("Query is not supported"))
}

+ 16
- 15
consensus/reactor.go View File

@ -127,7 +127,7 @@ func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !conR.fastSync {
conR.sendNewRoundStepMessage(peer)
conR.sendNewRoundStepMessages(peer)
}
}
@ -201,7 +201,6 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
BlockID: msg.BlockID,
Votes: ourVotes,
}})
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
@ -365,7 +364,7 @@ func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *
return
}
func (conR *ConsensusReactor) sendNewRoundStepMessage(peer *p2p.Peer) {
func (conR *ConsensusReactor) sendNewRoundStepMessages(peer *p2p.Peer) {
rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
@ -399,8 +398,9 @@ OUTER_LOOP:
Round: rs.Round, // This tells peer that this part applies to us.
Part: part,
}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
}
continue OUTER_LOOP
}
}
@ -415,9 +415,9 @@ OUTER_LOOP:
log.Warn("Failed to load block meta", "peer height", prs.Height, "our height", rs.Height, "blockstore height", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
} else if !blockMeta.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
} else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
log.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
}
@ -425,7 +425,7 @@ OUTER_LOOP:
part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
if part == nil {
log.Warn("Could not load part", "index", index,
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
}
@ -435,8 +435,9 @@ OUTER_LOOP:
Round: prs.Round, // Not our height, so it doesn't matter.
Part: part,
}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
}
continue OUTER_LOOP
} else {
//log.Info("No parts to send in catch-up, sleeping")
@ -462,8 +463,9 @@ OUTER_LOOP:
// Proposal: share the proposal metadata with peer.
{
msg := &ProposalMessage{Proposal: rs.Proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
ps.SetHasProposal(rs.Proposal)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
ps.SetHasProposal(rs.Proposal)
}
}
// ProposalPOL: lets peer know which POL votes we have so far.
// Peer must receive ProposalMessage first.
@ -806,13 +808,12 @@ func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
ps.ProposalBlockParts.SetIndex(index, true)
}
// Convenience function to send vote to peer.
// PickVoteToSend sends vote to peer.
// Returns true if vote was sent.
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) (ok bool) {
if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote}
ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
return true
return ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
}
return false
}


+ 187
- 200
consensus/replay.go View File

@ -1,24 +1,35 @@
package consensus
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"reflect"
"strconv"
"strings"
"time"
abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/go-autofile"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
// Functionality to replay blocks and messages on recovery from a crash.
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
// The former is handled by the WAL, the latter by the proxyApp Handshake on restart,
// which ultimately hands off the work to the WAL.
//-----------------------------------------
// recover from failure during consensus
// by replaying messages from the WAL
// Unmarshal and apply a single message to the consensus state
// as if it were received in receiveRoutine
// Lines that start with "#" are ignored.
@ -133,245 +144,221 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
return nil
}
//--------------------------------------------------------
// replay messages interactively or all at once
//--------------------------------------------------------------------------------
// Interactive playback
func (cs ConsensusState) ReplayConsole(file string) error {
return cs.replay(file, true)
// Parses marker lines of the form:
// #HEIGHT: 12345
func makeHeightSearchFunc(height int) auto.SearchFunc {
return func(line string) (int, error) {
line = strings.TrimRight(line, "\n")
parts := strings.Split(line, " ")
if len(parts) != 2 {
return -1, errors.New("Line did not have 2 parts")
}
i, err := strconv.Atoi(parts[1])
if err != nil {
return -1, errors.New("Failed to parse INFO: " + err.Error())
}
if height < i {
return 1, nil
} else if height == i {
return 0, nil
} else {
return -1, nil
}
}
}
// Full playback, with tests
func (cs ConsensusState) ReplayMessages(file string) error {
return cs.replay(file, false)
}
//----------------------------------------------
// Recover from failure during block processing
// by handshaking with the app to figure out where
// we were last and using the WAL to recover there
// replay all msgs or start the console
func (cs *ConsensusState) replay(file string, console bool) error {
if cs.IsRunning() {
return errors.New("cs is already running, cannot replay")
}
if cs.wal != nil {
return errors.New("cs wal is open, cannot replay")
}
type Handshaker struct {
config cfg.Config
state *sm.State
store types.BlockStore
cs.startForReplay()
nBlocks int // number of blocks applied to the state
}
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
func NewHandshaker(config cfg.Config, state *sm.State, store types.BlockStore) *Handshaker {
return &Handshaker{config, state, store, 0}
}
// just open the file for reading, no need to use wal
fp, err := os.OpenFile(file, os.O_RDONLY, 0666)
func (h *Handshaker) NBlocks() int {
return h.nBlocks
}
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn
res, err := proxyApp.Query().InfoSync()
if err != nil {
return err
return errors.New(Fmt("Error calling Info: %v", err))
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
defer pb.fp.Close()
blockHeight := int(res.LastBlockHeight) // XXX: beware overflow
appHash := res.LastBlockAppHash
var nextN int // apply N msgs in a row
for pb.scanner.Scan() {
if nextN == 0 && console {
nextN = pb.replayConsoleLoop()
}
log.Notice("ABCI Handshake", "appHeight", blockHeight, "appHash", appHash)
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
}
// TODO: check version
if nextN > 0 {
nextN -= 1
}
pb.count += 1
// replay blocks up to the latest in the blockstore
_, err = h.ReplayBlocks(appHash, blockHeight, proxyApp)
if err != nil {
return errors.New(Fmt("Error on replay: %v", err))
}
return nil
}
//------------------------------------------------
// playback manager
log.Notice("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", appHash)
type playback struct {
cs *ConsensusState
// TODO: (on restart) replay mempool
fp *os.File
scanner *bufio.Scanner
count int // how many lines/msgs into the file are we
// replays can be reset to beginning
fileName string // so we can close/reopen the file
genesisState *sm.State // so the replay session knows where to restart from
return nil
}
func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.State) *playback {
return &playback{
cs: cs,
fp: fp,
fileName: fileName,
genesisState: genState,
scanner: bufio.NewScanner(fp),
}
}
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
// Returns the final AppHash or an error
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp proxy.AppConns) ([]byte, error) {
// go back count steps by resetting the state and running (pb.count - count) steps
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
storeBlockHeight := h.store.Height()
stateBlockHeight := h.state.LastBlockHeight
log.Notice("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
pb.cs.Stop()
pb.cs.Wait()
// First handle edge cases and constraints on the storeBlockHeight
if storeBlockHeight == 0 {
return appHash, h.checkAppHash(appHash)
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool)
newCS.SetEventSwitch(pb.cs.evsw)
newCS.startForReplay()
} else if storeBlockHeight < appBlockHeight {
// the app should never be ahead of the store (but this is under app's control)
return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
pb.fp.Close()
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0666)
if err != nil {
return err
} else if storeBlockHeight < stateBlockHeight {
// the state should never be ahead of the store (this is under tendermint's control)
PanicSanity(Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
} else if storeBlockHeight > stateBlockHeight+1 {
// store should be at most one ahead of the state (this is under tendermint's control)
PanicSanity(Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
}
pb.fp = fp
pb.scanner = bufio.NewScanner(fp)
count = pb.count - count
log.Notice(Fmt("Reseting from %d to %d", pb.count, count))
pb.count = 0
pb.cs = newCS
for i := 0; pb.scanner.Scan() && i < count; i++ {
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
// Now either store is equal to state, or one ahead.
// For each, consider all cases of where the app could be, given app <= store
if storeBlockHeight == stateBlockHeight {
// Tendermint ran Commit and saved the state.
// Either the app is asking for replay, or we're all synced up.
if appBlockHeight < storeBlockHeight {
// the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store)
return h.replayBlocks(proxyApp, appBlockHeight, storeBlockHeight, false)
} else if appBlockHeight == storeBlockHeight {
// We're good!
return appHash, h.checkAppHash(appHash)
}
} else if storeBlockHeight == stateBlockHeight+1 {
// We saved the block in the store but haven't updated the state,
// so we'll need to replay a block using the WAL.
if appBlockHeight < stateBlockHeight {
// the app is further behind than it should be, so replay blocks
// but leave the last block to go through the WAL
return h.replayBlocks(proxyApp, appBlockHeight, storeBlockHeight, true)
} else if appBlockHeight == stateBlockHeight {
// We haven't run Commit (both the state and app are one block behind),
// so run through consensus with the real app
log.Info("Replay last block using real app")
return h.replayLastBlock(proxyApp.Consensus())
} else if appBlockHeight == storeBlockHeight {
// We ran Commit, but didn't save the state, so run through consensus with mock app
mockApp := newMockProxyApp(appHash)
log.Info("Replay last block using mock app")
return h.replayLastBlock(mockApp)
}
pb.count += 1
}
return nil
}
func (cs *ConsensusState) startForReplay() {
// don't want to start full cs
cs.BaseService.OnStart()
log.Warn("Replay commands are disabled until someone updates them and writes tests")
/* TODO:!
// since we replay tocks we just ignore ticks
go func() {
for {
select {
case <-cs.tickChan:
case <-cs.Quit:
return
}
}
}()*/
PanicSanity("Should never happen")
return nil, nil
}
// console function for parsing input and running commands
func (pb *playback) replayConsoleLoop() int {
for {
fmt.Printf("> ")
bufReader := bufio.NewReader(os.Stdin)
line, more, err := bufReader.ReadLine()
if more {
Exit("input is too long")
} else if err != nil {
Exit(err.Error())
}
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, useReplayFunc bool) ([]byte, error) {
// App is further behind than it should be, so we need to replay blocks.
// We replay all blocks from appBlockHeight+1 to storeBlockHeight-1,
// and let the final block be replayed through ReplayBlocks.
// Note that we don't have an old version of the state,
// so we by-pass state validation using applyBlock here.
tokens := strings.Split(string(line), " ")
if len(tokens) == 0 {
continue
var appHash []byte
var err error
finalBlock := storeBlockHeight
if useReplayFunc {
finalBlock -= 1
}
for i := appBlockHeight + 1; i <= finalBlock; i++ {
log.Info("Applying block", "height", i)
block := h.store.LoadBlock(i)
appHash, err = sm.ApplyBlock(proxyApp.Consensus(), block)
if err != nil {
return nil, err
}
switch tokens[0] {
case "next":
// "next" -> replay next message
// "next N" -> replay next N messages
h.nBlocks += 1
}
if len(tokens) == 1 {
return 0
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("next takes an integer argument")
} else {
return i
}
}
if useReplayFunc {
// sync the final block
return h.ReplayBlocks(appHash, finalBlock, proxyApp)
}
case "back":
// "back" -> go back one message
// "back N" -> go back N messages
return appHash, h.checkAppHash(appHash)
}
// NOTE: "back" is not supported in the state machine design,
// so we restart and replay up to
// Replay the last block through the consensus and return the AppHash from after Commit.
func (h *Handshaker) replayLastBlock(proxyApp proxy.AppConnConsensus) ([]byte, error) {
mempool := types.MockMempool{}
cs := NewConsensusState(h.config, h.state, proxyApp, h.store, mempool)
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(pb.cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
if len(tokens) == 1 {
pb.replayReset(1, newStepCh)
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("back takes an integer argument")
} else if i > pb.count {
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
} else {
pb.replayReset(i, newStepCh)
}
}
evsw := types.NewEventSwitch()
evsw.Start()
defer evsw.Stop()
cs.SetEventSwitch(evsw)
newBlockCh := subscribeToEvent(evsw, "consensus-replay", types.EventStringNewBlock(), 1)
case "rs":
// "rs" -> print entire round state
// "rs short" -> print height/round/step
// "rs <field>" -> print another field of the round state
// run through the WAL, commit new block, stop
cs.Start()
<-newBlockCh // TODO: use a timeout and return err?
cs.Stop()
rs := pb.cs.RoundState
if len(tokens) == 1 {
fmt.Println(rs)
} else {
switch tokens[1] {
case "short":
fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step)
case "validators":
fmt.Println(rs.Validators)
case "proposal":
fmt.Println(rs.Proposal)
case "proposal_block":
fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort())
case "locked_round":
fmt.Println(rs.LockedRound)
case "locked_block":
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
case "votes":
fmt.Println(rs.Votes.StringIndented(" "))
default:
fmt.Println("Unknown option", tokens[1])
}
}
case "n":
fmt.Println(pb.count)
}
h.nBlocks += 1
return cs.state.AppHash, nil
}
func (h *Handshaker) checkAppHash(appHash []byte) error {
if !bytes.Equal(h.state.AppHash, appHash) {
panic(errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash)).Error())
return nil
}
return 0
return nil
}
//--------------------------------------------------------------------------------
// Parses marker lines of the form:
// #HEIGHT: 12345
func makeHeightSearchFunc(height int) auto.SearchFunc {
return func(line string) (int, error) {
line = strings.TrimRight(line, "\n")
parts := strings.Split(line, " ")
if len(parts) != 2 {
return -1, errors.New("Line did not have 2 parts")
}
i, err := strconv.Atoi(parts[1])
if err != nil {
return -1, errors.New("Failed to parse INFO: " + err.Error())
}
if height < i {
return 1, nil
} else if height == i {
return 0, nil
} else {
return -1, nil
}
}
func newMockProxyApp(appHash []byte) proxy.AppConnConsensus {
clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{appHash: appHash})
cli, _ := clientCreator.NewABCIClient()
return proxy.NewAppConnConsensus(cli)
}
type mockProxyApp struct {
abci.BaseApplication
appHash []byte
}
func (mock *mockProxyApp) Commit() abci.Result {
return abci.NewResultOK(mock.appHash, "")
}

+ 271
- 0
consensus/replay_file.go View File

@ -0,0 +1,271 @@
package consensus
import (
"bufio"
"errors"
"fmt"
"os"
"strconv"
"strings"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
bc "github.com/tendermint/tendermint/blockchain"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
//--------------------------------------------------------
// replay messages interactively or all at once
func RunReplayFile(config cfg.Config, walFile string, console bool) {
consensusState := newConsensusStateForReplay(config)
if err := consensusState.ReplayFile(walFile, console); err != nil {
Exit(Fmt("Error during consensus replay: %v", err))
}
}
// Replay msgs in file or start the console
func (cs *ConsensusState) ReplayFile(file string, console bool) error {
if cs.IsRunning() {
return errors.New("cs is already running, cannot replay")
}
if cs.wal != nil {
return errors.New("cs wal is open, cannot replay")
}
cs.startForReplay()
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
// just open the file for reading, no need to use wal
fp, err := os.OpenFile(file, os.O_RDONLY, 0666)
if err != nil {
return err
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
defer pb.fp.Close()
var nextN int // apply N msgs in a row
for pb.scanner.Scan() {
if nextN == 0 && console {
nextN = pb.replayConsoleLoop()
}
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
}
if nextN > 0 {
nextN -= 1
}
pb.count += 1
}
return nil
}
//------------------------------------------------
// playback manager
type playback struct {
cs *ConsensusState
fp *os.File
scanner *bufio.Scanner
count int // how many lines/msgs into the file are we
// replays can be reset to beginning
fileName string // so we can close/reopen the file
genesisState *sm.State // so the replay session knows where to restart from
}
func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.State) *playback {
return &playback{
cs: cs,
fp: fp,
fileName: fileName,
genesisState: genState,
scanner: bufio.NewScanner(fp),
}
}
// go back count steps by resetting the state and running (pb.count - count) steps
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
pb.cs.Stop()
pb.cs.Wait()
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool)
newCS.SetEventSwitch(pb.cs.evsw)
newCS.startForReplay()
pb.fp.Close()
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0666)
if err != nil {
return err
}
pb.fp = fp
pb.scanner = bufio.NewScanner(fp)
count = pb.count - count
log.Notice(Fmt("Reseting from %d to %d", pb.count, count))
pb.count = 0
pb.cs = newCS
for i := 0; pb.scanner.Scan() && i < count; i++ {
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
}
pb.count += 1
}
return nil
}
func (cs *ConsensusState) startForReplay() {
// don't want to start full cs
cs.BaseService.OnStart()
log.Warn("Replay commands are disabled until someone updates them and writes tests")
/* TODO:!
// since we replay tocks we just ignore ticks
go func() {
for {
select {
case <-cs.tickChan:
case <-cs.Quit:
return
}
}
}()*/
}
// console function for parsing input and running commands
func (pb *playback) replayConsoleLoop() int {
for {
fmt.Printf("> ")
bufReader := bufio.NewReader(os.Stdin)
line, more, err := bufReader.ReadLine()
if more {
Exit("input is too long")
} else if err != nil {
Exit(err.Error())
}
tokens := strings.Split(string(line), " ")
if len(tokens) == 0 {
continue
}
switch tokens[0] {
case "next":
// "next" -> replay next message
// "next N" -> replay next N messages
if len(tokens) == 1 {
return 0
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("next takes an integer argument")
} else {
return i
}
}
case "back":
// "back" -> go back one message
// "back N" -> go back N messages
// NOTE: "back" is not supported in the state machine design,
// so we restart and replay up to
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(pb.cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
if len(tokens) == 1 {
pb.replayReset(1, newStepCh)
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("back takes an integer argument")
} else if i > pb.count {
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
} else {
pb.replayReset(i, newStepCh)
}
}
case "rs":
// "rs" -> print entire round state
// "rs short" -> print height/round/step
// "rs <field>" -> print another field of the round state
rs := pb.cs.RoundState
if len(tokens) == 1 {
fmt.Println(rs)
} else {
switch tokens[1] {
case "short":
fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step)
case "validators":
fmt.Println(rs.Validators)
case "proposal":
fmt.Println(rs.Proposal)
case "proposal_block":
fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort())
case "locked_round":
fmt.Println(rs.LockedRound)
case "locked_block":
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
case "votes":
fmt.Println(rs.Votes.StringIndented(" "))
default:
fmt.Println("Unknown option", tokens[1])
}
}
case "n":
fmt.Println(pb.count)
}
}
return 0
}
//--------------------------------------------------------------------------------
// convenience for replay mode
func newConsensusStateForReplay(config cfg.Config) *ConsensusState {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
// Create proxyAppConn connection (consensus, mempool, query)
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), NewHandshaker(config, state, blockStore))
_, err := proxyApp.Start()
if err != nil {
Exit(Fmt("Error starting proxy app conns: %v", err))
}
// add the chainid to the global config
config.Set("chain_id", state.ChainID)
// Make event switch
eventSwitch := types.NewEventSwitch()
if _, err := eventSwitch.Start(); err != nil {
Exit(Fmt("Failed to start event switch: %v", err))
}
mempool := mempl.NewMempool(config, proxyApp.Mempool())
consensusState := NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusState.SetEventSwitch(eventSwitch)
return consensusState
}

+ 437
- 27
consensus/replay_test.go View File

@ -1,7 +1,10 @@
package consensus
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
@ -11,8 +14,14 @@ import (
"github.com/tendermint/tendermint/config/tendermint_test"
. "github.com/tendermint/go-common"
"github.com/tendermint/abci/example/dummy"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@ -20,14 +29,23 @@ func init() {
config = tendermint_test.ResetConfig("consensus_replay_test")
}
// TODO: these tests ensure we can always recover from any state of the wal,
// assuming it comes with a correct related state for the priv_validator.json.
// It would be better to verify explicitly which states we can recover from without the wal
// These tests ensure we can always recover from failure at any part of the consensus process.
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
// Only the latter interacts with the app and store,
// but the former has to deal with restrictions on re-use of priv_validator keys.
// The `WAL Tests` are for failures during the consensus;
// the `Handshake Tests` are for failures in applying the block.
// With the help of the WAL, we can recover from it all!
var data_dir = path.Join(cmn.GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data")
//------------------------------------------------------------------------------------------
// WAL Tests
// TODO: It would be better to verify explicitly which states we can recover from without the wal
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
var data_dir = path.Join(GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data")
// the priv validator changes step at these lines for a block with 1 val and 1 part
var baseStepChanges = []int{3, 6, 8}
@ -50,7 +68,7 @@ type testCase struct {
func newTestCase(name string, stepChanges []int) *testCase {
if len(stepChanges) != 3 {
panic(Fmt("a full wal has 3 step changes! Got array %v", stepChanges))
panic(cmn.Fmt("a full wal has 3 step changes! Got array %v", stepChanges))
}
return &testCase{
name: name,
@ -85,18 +103,19 @@ func readWAL(p string) string {
func writeWAL(walMsgs string) string {
tempDir := os.TempDir()
walDir := tempDir + "/wal" + RandStr(12)
walDir := path.Join(tempDir, "/wal"+cmn.RandStr(12))
walFile := path.Join(walDir, "wal")
// Create WAL directory
err := EnsureDir(walDir, 0700)
err := cmn.EnsureDir(walDir, 0700)
if err != nil {
panic(err)
}
// Write the needed WAL to file
err = WriteFile(walDir+"/wal", []byte(walMsgs), 0600)
err = cmn.WriteFile(walFile, []byte(walMsgs), 0600)
if err != nil {
panic(err)
}
return walDir
return walFile
}
func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
@ -104,14 +123,14 @@ func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
select {
case <-newBlockCh:
case <-after:
panic(Fmt("Timed out waiting for new block for case '%s' line %d", thisCase.name, i))
panic(cmn.Fmt("Timed out waiting for new block for case '%s' line %d", thisCase.name, i))
}
}
func runReplayTest(t *testing.T, cs *ConsensusState, walDir string, newBlockCh chan interface{},
func runReplayTest(t *testing.T, cs *ConsensusState, walFile string, newBlockCh chan interface{},
thisCase *testCase, i int) {
cs.config.Set("cs_wal_dir", walDir)
cs.config.Set("cs_wal_file", walFile)
cs.Start()
// Wait to make a new block.
// This is just a signal that we haven't halted; its not something contained in the WAL itself.
@ -137,7 +156,7 @@ func toPV(pv PrivValidator) *types.PrivValidator {
func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
fmt.Println("-------------------------------------")
log.Notice(Fmt("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter))
log.Notice(cmn.Fmt("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter))
lineStep := nLines
if crashAfter {
@ -148,7 +167,7 @@ func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*Consensu
lastMsg := split[nLines]
// we write those lines up to (not including) one with the signature
walDir := writeWAL(strings.Join(split[:nLines], "\n") + "\n")
walFile := writeWAL(strings.Join(split[:nLines], "\n") + "\n")
cs := fixedConsensusStateDummy()
@ -160,7 +179,7 @@ func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*Consensu
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
return cs, newBlockCh, lastMsg, walDir
return cs, newBlockCh, lastMsg, walFile
}
func readTimedWALMessage(t *testing.T, walMsg string) TimedWALMessage {
@ -177,12 +196,12 @@ func readTimedWALMessage(t *testing.T, walMsg string) TimedWALMessage {
// Test the log at every iteration, and set the privVal last step
// as if the log was written after signing, before the crash
func TestReplayCrashAfterWrite(t *testing.T) {
func TestWALCrashAfterWrite(t *testing.T) {
for _, thisCase := range testCases {
split := strings.Split(thisCase.log, "\n")
for i := 0; i < len(split)-1; i++ {
cs, newBlockCh, _, walDir := setupReplayTest(thisCase, i+1, true)
runReplayTest(t, cs, walDir, newBlockCh, thisCase, i+1)
cs, newBlockCh, _, walFile := setupReplayTest(thisCase, i+1, true)
runReplayTest(t, cs, walFile, newBlockCh, thisCase, i+1)
}
}
}
@ -191,27 +210,27 @@ func TestReplayCrashAfterWrite(t *testing.T) {
// Test the log as if we crashed after signing but before writing.
// This relies on privValidator.LastSignature being set
func TestReplayCrashBeforeWritePropose(t *testing.T) {
func TestWALCrashBeforeWritePropose(t *testing.T) {
for _, thisCase := range testCases {
lineNum := thisCase.proposeLine
// setup replay test where last message is a proposal
cs, newBlockCh, proposalMsg, walDir := setupReplayTest(thisCase, lineNum, false)
cs, newBlockCh, proposalMsg, walFile := setupReplayTest(thisCase, lineNum, false)
msg := readTimedWALMessage(t, proposalMsg)
proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage)
// Set LastSig
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal)
toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature
runReplayTest(t, cs, walDir, newBlockCh, thisCase, lineNum)
runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum)
}
}
func TestReplayCrashBeforeWritePrevote(t *testing.T) {
func TestWALCrashBeforeWritePrevote(t *testing.T) {
for _, thisCase := range testCases {
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.prevoteLine, types.EventStringCompleteProposal())
}
}
func TestReplayCrashBeforeWritePrecommit(t *testing.T) {
func TestWALCrashBeforeWritePrecommit(t *testing.T) {
for _, thisCase := range testCases {
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.precommitLine, types.EventStringPolka())
}
@ -219,7 +238,7 @@ func TestReplayCrashBeforeWritePrecommit(t *testing.T) {
func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum int, eventString string) {
// setup replay test where last message is a vote
cs, newBlockCh, voteMsg, walDir := setupReplayTest(thisCase, lineNum, false)
cs, newBlockCh, voteMsg, walFile := setupReplayTest(thisCase, lineNum, false)
types.AddListenerForEvent(cs.evsw, "tester", eventString, func(data types.TMEventData) {
msg := readTimedWALMessage(t, voteMsg)
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
@ -227,5 +246,396 @@ func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum in
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
toPV(cs.privValidator).LastSignature = vote.Vote.Signature
})
runReplayTest(t, cs, walDir, newBlockCh, thisCase, lineNum)
runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum)
}
//------------------------------------------------------------------------------------------
// Handshake Tests
var (
NUM_BLOCKS = 6 // number of blocks in the test_data/many_blocks.cswal
mempool = types.MockMempool{}
testPartSize int
)
//---------------------------------------
// Test handshake/replay
// 0 - all synced up
// 1 - saved block but app and state are behind
// 2 - save block and committed but state is behind
var modes = []uint{0, 1, 2}
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 0, m)
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 1, m)
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS-1, m)
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS, m)
}
}
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
config := tendermint_test.ResetConfig("proxy_test_")
// copy the many_blocks file
walBody, err := cmn.ReadFile(path.Join(data_dir, "many_blocks.cswal"))
if err != nil {
t.Fatal(err)
}
walFile := writeWAL(string(walBody))
config.Set("cs_wal_file", walFile)
privVal := types.LoadPrivValidator(config.GetString("priv_validator_file"))
testPartSize = config.GetInt("block_part_size")
wal, err := NewWAL(walFile, false)
if err != nil {
t.Fatal(err)
}
chain, commits, err := makeBlockchainFromWAL(wal)
if err != nil {
t.Fatalf(err.Error())
}
state, store := stateAndStore(config, privVal.PubKey)
store.chain = chain
store.commits = commits
// run the chain through state.ApplyBlock to build up the tendermint state
latestAppHash := buildTMStateFromChain(config, state, chain, mode)
// make a new client creator
dummyApp := dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "2"))
clientCreator2 := proxy.NewLocalClientCreator(dummyApp)
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(config, clientCreator2, nil)
state, _ := stateAndStore(config, privVal.PubKey)
buildAppStateFromChain(proxyApp, state, chain, nBlocks, mode)
}
// now start the app using the handshake - it should sync
handshaker := NewHandshaker(config, state, store)
proxyApp := proxy.NewAppConns(config, clientCreator2, handshaker)
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync()
if err != nil {
t.Fatal(err)
}
// the app hash should be synced up
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
}
expectedBlocksToSync := NUM_BLOCKS - nBlocks
if nBlocks == NUM_BLOCKS && mode > 0 {
expectedBlocksToSync += 1
} else if nBlocks > 0 && mode == 1 {
expectedBlocksToSync += 1
}
if handshaker.NBlocks() != expectedBlocksToSync {
t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks())
}
}
func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) {
err := st.ApplyBlock(nil, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
panic(err)
}
}
func buildAppStateFromChain(proxyApp proxy.AppConns,
state *sm.State, chain []*types.Block, nBlocks int, mode uint) {
// start a new app without handshake, play nBlocks blocks
if _, err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
switch mode {
case 0:
for i := 0; i < nBlocks; i++ {
block := chain[i]
applyBlock(state, block, proxyApp)
}
case 1, 2:
for i := 0; i < nBlocks-1; i++ {
block := chain[i]
applyBlock(state, block, proxyApp)
}
if mode == 2 {
// update the dummy height and apphash
// as if we ran commit but not
applyBlock(state, chain[nBlocks-1], proxyApp)
}
}
}
func buildTMStateFromChain(config cfg.Config, state *sm.State, chain []*types.Block, mode uint) []byte {
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "1")))
proxyApp := proxy.NewAppConns(config, clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock))
if _, err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
var latestAppHash []byte
switch mode {
case 0:
// sync right up
for _, block := range chain {
applyBlock(state, block, proxyApp)
}
latestAppHash = state.AppHash
case 1, 2:
// sync up to the penultimate as if we stored the block.
// whether we commit or not depends on the appHash
for _, block := range chain[:len(chain)-1] {
applyBlock(state, block, proxyApp)
}
// apply the final block to a state copy so we can
// get the right next appHash but keep the state back
stateCopy := state.Copy()
applyBlock(stateCopy, chain[len(chain)-1], proxyApp)
latestAppHash = stateCopy.AppHash
}
return latestAppHash
}
//--------------------------
// utils for making blocks
func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
// Search for height marker
gr, found, err := wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(1))
if err != nil {
return nil, nil, err
}
if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
}
defer gr.Close()
log.Notice("Build a blockchain by reading from the WAL")
var blockParts *types.PartSet
var blocks []*types.Block
var commits []*types.Commit
for {
line, err := gr.ReadLine()
if err != nil {
if err == io.EOF {
break
} else {
return nil, nil, err
}
}
piece, err := readPieceFromWAL([]byte(line))
if err != nil {
return nil, nil, err
}
if piece == nil {
continue
}
switch p := piece.(type) {
case *types.PartSetHeader:
// if its not the first one, we have a full block
if blockParts != nil {
var n int
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
blocks = append(blocks, block)
}
blockParts = types.NewPartSetFromHeader(*p)
case *types.Part:
_, err := blockParts.AddPart(p, false)
if err != nil {
return nil, nil, err
}
case *types.Vote:
if p.Type == types.VoteTypePrecommit {
commit := &types.Commit{
BlockID: p.BlockID,
Precommits: []*types.Vote{p},
}
commits = append(commits, commit)
}
}
}
// grab the last block too
var n int
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
blocks = append(blocks, block)
return blocks, commits, nil
}
func readPieceFromWAL(msgBytes []byte) (interface{}, error) {
// Skip over empty and meta lines
if len(msgBytes) == 0 || msgBytes[0] == '#' {
return nil, nil
}
var err error
var msg TimedWALMessage
wire.ReadJSON(&msg, msgBytes, &err)
if err != nil {
fmt.Println("MsgBytes:", msgBytes, string(msgBytes))
return nil, fmt.Errorf("Error reading json data: %v", err)
}
// for logging
switch m := msg.Msg.(type) {
case msgInfo:
switch msg := m.Msg.(type) {
case *ProposalMessage:
return &msg.Proposal.BlockPartsHeader, nil
case *BlockPartMessage:
return msg.Part, nil
case *VoteMessage:
return msg.Vote, nil
}
}
return nil, nil
}
// make some bogus txs
func txsFunc(blockNum int) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
}
return txs
}
// sign a commit vote
func signCommit(chainID string, privVal *types.PrivValidator, height, round int, hash []byte, header types.PartSetHeader) *types.Vote {
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: privVal.Address,
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{hash, header},
}
sig := privVal.Sign(types.SignBytes(chainID, vote))
vote.Signature = sig
return vote
}
// make a blockchain with one validator
func makeBlockchain(t *testing.T, chainID string, nBlocks int, privVal *types.PrivValidator, proxyApp proxy.AppConns, state *sm.State) (blockchain []*types.Block, commits []*types.Commit) {
prevHash := state.LastBlockID.Hash
lastCommit := new(types.Commit)
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
for i := 1; i < nBlocks+1; i++ {
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit,
prevBlockID, valHash, state.AppHash, testPartSize)
fmt.Println(i)
fmt.Println(block.LastBlockID)
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(i, err)
}
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators)
vote := signCommit(chainID, privVal, i, 0, block.Hash(), parts.Header())
_, err = voteSet.AddVote(vote)
if err != nil {
t.Fatal(err)
}
prevHash = block.Hash()
prevParts = parts.Header()
lastCommit = voteSet.MakeCommit()
prevBlockID = types.BlockID{prevHash, prevParts}
blockchain = append(blockchain, block)
commits = append(commits, lastCommit)
}
return blockchain, commits
}
// fresh state and mock store
func stateAndStore(config cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
return sm.MakeGenesisState(stateDB, &types.GenesisDoc{
ChainID: config.GetString("chain_id"),
Validators: []types.GenesisValidator{
types.GenesisValidator{pubKey, 10000, "test"},
},
AppHash: nil,
}), NewMockBlockStore(config)
}
//----------------------------------
// mock block store
type mockBlockStore struct {
config cfg.Config
chain []*types.Block
commits []*types.Commit
}
// TODO: NewBlockStore(db.NewMemDB) ...
func NewMockBlockStore(config cfg.Config) *mockBlockStore {
return &mockBlockStore{config, nil, nil}
}
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] }
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.config.GetInt("block_part_size")).Header()},
Header: block.Header,
}
}
func (bs *mockBlockStore) LoadBlockPart(height int, index int) *types.Part { return nil }
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
}
func (bs *mockBlockStore) LoadBlockCommit(height int) *types.Commit {
return bs.commits[height-1]
}
func (bs *mockBlockStore) LoadSeenCommit(height int) *types.Commit {
return bs.commits[height-1]
}

+ 24
- 47
consensus/state.go View File

@ -4,7 +4,7 @@ import (
"bytes"
"errors"
"fmt"
"io"
"path"
"reflect"
"sync"
"time"
@ -14,8 +14,6 @@ import (
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-wire"
bc "github.com/tendermint/tendermint/blockchain"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@ -124,6 +122,8 @@ func (rs RoundStepType) String() string {
//-----------------------------------------------------------------------------
// Immutable when returned from ConsensusState.GetRoundState()
// TODO: Actually, only the top pointer is copied,
// so access to field pointers is still racey
type RoundState struct {
Height int // Height we are working on
Round int
@ -226,8 +226,8 @@ type ConsensusState struct {
config cfg.Config
proxyAppConn proxy.AppConnConsensus
blockStore *bc.BlockStore
mempool *mempl.Mempool
blockStore types.BlockStore
mempool types.Mempool
privValidator PrivValidator // for signing votes
@ -255,7 +255,7 @@ type ConsensusState struct {
done chan struct{}
}
func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore *bc.BlockStore, mempool *mempl.Mempool) *ConsensusState {
func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore types.BlockStore, mempool types.Mempool) *ConsensusState {
cs := &ConsensusState{
config: config,
proxyAppConn: proxyAppConn,
@ -342,35 +342,18 @@ func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
func (cs *ConsensusState) OnStart() error {
cs.BaseService.OnStart()
walDir := cs.config.GetString("cs_wal_dir")
err := EnsureDir(walDir, 0700)
walFile := cs.config.GetString("cs_wal_file")
err := EnsureDir(path.Dir(walFile), 0700)
if err != nil {
log.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
return err
}
err = cs.OpenWAL(walDir)
err = cs.OpenWAL(walFile)
if err != nil {
log.Error("Error loading ConsensusState wal", "error", err.Error())
return err
}
// If the latest block was applied in the abci handshake,
// we may not have written the current height to the wal,
// so check here and write it if not found.
// TODO: remove this and run the handhsake/replay
// through the consensus state with a mock app
gr, found, err := cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(cs.Height))
if (err == io.EOF || !found) && cs.Step == RoundStepNewHeight {
log.Warn("Height not found in wal. Writing new height", "height", cs.Height)
rs := cs.RoundStateEvent()
cs.wal.Save(rs)
} else if err != nil {
return err
}
if gr != nil {
gr.Close()
}
// we need the timeoutRoutine for replay so
// we don't block on the tick chan.
// NOTE: we will get a build up of garbage go routines
@ -420,10 +403,10 @@ func (cs *ConsensusState) Wait() {
}
// Open file to log all consensus messages and timeouts for deterministic accountability
func (cs *ConsensusState) OpenWAL(walDir string) (err error) {
func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
wal, err := NewWAL(walDir, cs.config.GetBool("cs_wal_light"))
wal, err := NewWAL(walFile, cs.config.GetBool("cs_wal_light"))
if err != nil {
return err
}
@ -569,7 +552,6 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
// Reset fields based on state.
validators := state.Validators
height := state.LastBlockHeight + 1 // Next desired block height
lastPrecommits := (*types.VoteSet)(nil)
if cs.CommitRound > -1 && cs.Votes != nil {
if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() {
@ -578,6 +560,9 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
lastPrecommits = cs.Votes.Precommits(cs.CommitRound)
}
// Next desired block height
height := state.LastBlockHeight + 1
// RoundState fields
cs.updateHeight(height)
cs.updateRoundStep(0, RoundStepNewHeight)
@ -622,11 +607,6 @@ func (cs *ConsensusState) newStep() {
//-----------------------------------------
// the main go routines
// a nice idea but probably more trouble than its worth
func (cs *ConsensusState) stopTimer() {
cs.timeoutTicker.Stop()
}
// receiveRoutine handles messages which may cause state transitions.
// it's argument (n) is the number of messages to process before exiting - use 0 to run forever
// It keeps the RoundState and is the only thing that updates it.
@ -765,7 +745,6 @@ func (cs *ConsensusState) enterNewRound(height int, round int) {
if now := time.Now(); cs.StartTime.After(now) {
log.Warn("Need to set a buffer and log.Warn() here for sanity.", "startTime", cs.StartTime, "now", now)
}
// cs.stopTimer()
log.Notice(Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
@ -827,10 +806,10 @@ func (cs *ConsensusState) enterPropose(height int, round int) {
return
}
if !bytes.Equal(cs.Validators.Proposer().Address, cs.privValidator.GetAddress()) {
log.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.Proposer().Address, "privValidator", cs.privValidator)
if !bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress()) {
log.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
} else {
log.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.Proposer().Address, "privValidator", cs.privValidator)
log.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
cs.decideProposal(height, round)
}
@ -945,8 +924,6 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
// TODO: catchup event?
}
// cs.stopTimer()
log.Info(Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Sign and broadcast vote as necessary
@ -1020,8 +997,6 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
return
}
// cs.stopTimer()
log.Info(Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
@ -1185,13 +1160,13 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) {
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
if !ok || len(blockID.Hash) == 0 {
log.Warn("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.")
log.Warn("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.", "height", height)
return
}
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
// TODO: this happens every time if we're not a validator (ugly logs)
// TODO: ^^ wait, why does it matter that we're a validator?
log.Warn("Attempt to finalize failed. We don't have the commit block.")
log.Warn("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
return
}
// go
@ -1235,7 +1210,8 @@ func (cs *ConsensusState) finalizeCommit(height int) {
seenCommit := precommits.MakeCommit()
cs.blockStore.SaveBlock(block, blockParts, seenCommit)
} else {
log.Warn("Why are we finalizeCommitting a block height we already have?", "height", block.Height)
// Happens during replay if we already saved the block but didn't commit
log.Info("Calling finalizeCommit on already stored block", "height", block.Height)
}
fail.Fail() // XXX
@ -1250,7 +1226,8 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// NOTE: the block.AppHash wont reflect these txs until the next block
err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
if err != nil {
// TODO!
log.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "error", err)
return
}
fail.Fail() // XXX
@ -1306,7 +1283,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
}
// Verify signature
if !cs.Validators.Proposer().PubKey.VerifyBytes(types.SignBytes(cs.state.ChainID, proposal), proposal.Signature) {
if !cs.Validators.GetProposer().PubKey.VerifyBytes(types.SignBytes(cs.state.ChainID, proposal), proposal.Signature) {
return ErrInvalidProposalSignature
}


+ 3
- 3
consensus/state_test.go View File

@ -65,7 +65,7 @@ func TestProposerSelection0(t *testing.T) {
<-newRoundCh
// lets commit a block and ensure proposer for the next height is correct
prop := cs1.GetRoundState().Validators.Proposer()
prop := cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
}
@ -79,7 +79,7 @@ func TestProposerSelection0(t *testing.T) {
// wait for new round so next validator is set
<-newRoundCh
prop = cs1.GetRoundState().Validators.Proposer()
prop = cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[1].Address) {
panic(Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address))
}
@ -100,7 +100,7 @@ func TestProposerSelection2(t *testing.T) {
// everyone just votes nil. we get a new proposer each round
for i := 0; i < len(vss); i++ {
prop := cs1.GetRoundState().Validators.Proposer()
prop := cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].Address) {
panic(Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
}


+ 48
- 3
consensus/test_data/build.sh View File

@ -1,12 +1,13 @@
#! /bin/bash
# XXX: removes tendermint dir
cd $GOPATH/src/github.com/tendermint/tendermint
# specify a dir to copy
# NOTE: eventually we should replace with `tendermint init --test`
# TODO: eventually we should replace with `tendermint init --test`
DIR=$HOME/.tendermint_test/consensus_state_test
# XXX: remove tendermint dir
rm -rf $HOME/.tendermint
cp -r $DIR $HOME/.tendermint
@ -18,6 +19,7 @@ function reset(){
reset
# empty block
function empty_block(){
tendermint node --proxy_app=dummy &> /dev/null &
sleep 5
killall tendermint
@ -28,21 +30,40 @@ killall tendermint
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal
reset
}
# many blocks
function many_blocks(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
sleep 7
killall tendermint
kill -9 $PID
sed '/HEIGHT: 7/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/many_blocks.cswal
reset
}
# small block 1
function small_block1(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
sleep 5
sleep 10
killall tendermint
kill -9 $PID
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal
reset
}
# small block 2 (part size = 512)
function small_block2(){
echo "" >> ~/.tendermint/config.toml
echo "block_part_size = 512" >> ~/.tendermint/config.toml
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
@ -55,4 +76,28 @@ kill -9 $PID
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal
reset
}
case "$1" in
"small_block1")
small_block1
;;
"small_block2")
small_block2
;;
"empty_block")
empty_block
;;
"many_blocks")
many_blocks
;;
*)
small_block1
small_block2
empty_block
many_blocks
esac

+ 65
- 0
consensus/test_data/many_blocks.cswal
File diff suppressed because it is too large
View File


+ 2
- 2
consensus/wal.go View File

@ -40,8 +40,8 @@ type WAL struct {
light bool // ignore block parts
}
func NewWAL(walDir string, light bool) (*WAL, error) {
group, err := auto.OpenGroup(walDir + "/wal")
func NewWAL(walFile string, light bool) (*WAL, error) {
group, err := auto.OpenGroup(walFile)
if err != nil {
return nil, err
}


+ 16
- 0
docs/architecture/ABCI.md View File

@ -0,0 +1,16 @@
# ABCI
ABCI is an interface between the consensus/blockchain engine known as tendermint, and the application-specific business logic, known as an ABCi app.
The tendermint core should run unchanged for all apps. Each app can customize it, the supported transactions, queries, even the validator sets and how to handle staking / slashing stake. This customization is achieved by implementing the ABCi app to send the proper information to the tendermint engine to perform as directed.
To understand this decision better, think of the design of the tendermint engine.
* A blockchain is simply consensus on a unique global ordering of events.
* This consensus can efficiently be implemented using BFT and PoS
* This code can be generalized to easily support a large number of blockchains
* The block-chain specific code, the interpretation of the individual events, can be implemented by a 3rd party app without touching the consensus engine core
* Use an efficient, language-agnostic layer to implement this (ABCi)
Bucky, please make this doc real.

+ 16
- 0
docs/architecture/README.md View File

@ -0,0 +1,16 @@
# Architecture Decision Records
This is a location to record all high-level architecture decisions in the tendermin project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match.
This is like our guide and mentor when Jae and Bucky are offline.... The concept comes from a [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t) that resonated among the team when Anton shared it.
Each section of the code can have it's own markdown file in this directory, and please add a link to the readme.
## Sections
* [ABCI](./ABCI.md)
* [go-merkle / merkleeyes](./merkle.md)
* [Frey's thoughts on the data store](./merkle-frey.md)
* basecoin
* tendermint core (multiple sections)
* ???

+ 240
- 0
docs/architecture/merkle-frey.md View File

@ -0,0 +1,240 @@
# Merkle data stores - Frey's proposal
## TL;DR
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implementation of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an app hash to the consensus engine, as well as a full proof to any client.
This is equivalent to building a database, and I would propose designing it from the API first, then looking how to implement this (or make an adapter from the API to existing implementations). Once we agree on the functionality and the interface, we can implement the API bindings, and then work on building adapters to existence merkle-ized data stores, or modifying the stores to support this interface.
We need to consider the API (both in-process and over the network), language bindings, maintaining handles to old state (and garbage collecting), persistence, security, providing merkle proofs, and general key-value store operations. To stay consistent with the blockchains "single global order of operations", this data store should only allow one connection at a time to have write access.
## Overview
* **State**
* There are two concepts of state, "committed state" and "working state"
* The working state is only accessible from the ABCi app, allows writing, but does not need to support proofs.
* When we commit the "working state", it becomes a new "committed state" and has an immutable root hash, provides proofs, and can be exposed to external clients.
* **Transactions**
* The database always allows creating a read-only transaction at the last "committed state", this transaction can serve read queries and proofs.
* The database maintains all data to serve these read transactions until they are closed by the client (or time out). This allows the client(s) to determine how much old info is needed
* The database can only support *maximal* one writable transaction at a time. This makes it easy to enforce serializability, and attempting to start a second writable transaction may trigger a panic.
* **Functionality**
* It must support efficient key-value operations (get/set/delete)
* It must support returning merkle proofs for any "committed state"
* It should support range queries on subsets of the key space if possible (ie. if the db doesn't hash keys)
* It should also support listening to changes to a desired key via pub-sub or similar method, so I can quickly notify you on a change to your balance without constant polling.
* It may support other db-specific query types as an extension to this interface, as long as all specified actions maintain their meaning.
* **Interface**
* This interface should be domain-specific - ie. designed just for this use case
* It should present a simple go interface for embedding the data store in-process
* It should create a gRPC/protobuf API for calling from any client
* It should provide and maintain client adapters from our in-process interface to gRPC client calls for at least golang and Java (maybe more languages?)
* It should provide and maintain server adapters from our gRPC calls to the in-process interface for golang at least (unless there is another server we wish to support)
* **Persistence**
* It must support atomic persistence upon committing a new block. That is, upon crash recovery, the state is guaranteed to represent the state at the end of a complete block (along with a note of which height it was).
* It must delay deletion of old data as long as there are open read-only transactions referring to it, thus we must maintain some sort of WAL to keep track of pending cleanup.
* When a transaction is closed, or when we recover from a crash, it should clean up all no longer needed data to avoid memory/storage leaks.
* **Security and Auth**
* If we allow connections over gRPC, we must consider this issues and allow both encryption (SSL), and some basic auth rules to prevent undesired access to the DB
* This is client-specific and does not need to be supported in the in-process, embedded version.
## Details
Here we go more in-depth in each of the sections, explaining the reasoning and more details on the desired behavior. This document is only the high-level architecture and should support multiple implementations. When building out a specific implementation, a similar document should be provided for that repo, showing how it implements these concepts, and details about memory usage, storage, efficiency, etc.
### State
The current ABCi interface avoids this question a bit and that has brought confusion. If I use `merkleeyes` to store data, which state is returned from `Query`? The current "working" state, which I would like to refer to in my ABCi application? Or the last committed state, which I would like to return to a client's query? Or an old state, which I may select based on height?
Right now, `merkleeyes` implements `Query` like a normal ABCi app and only returns committed state, which has lead to problems and confusion. Thus, we need to be explicit about which state we want to view. Each viewer can then specify which state it wants to view. This allows the app to query the working state in DeliverTx, but the committed state in Query.
We can easily provide two global references for "last committed" and "current working" states. However, if we want to also allow querying of older commits... then we need some way to keep track of which ones are still in use, so we can garbage collect the unneeded ones. There is a non-trivial overhead in holding references to all past states, but also a hard-coded solution (hold onto the last 5 commits) may not support all clients. We should let the client define this somehow.
### Transactions
Transactions (in the typical database sense) are a clean and established solution to this issue. We can look at the [isolations levels](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable) which attempt to provide us things like "repeatable reads". That means if we open a transaction, and query some data 100 times while other processes are writing to the db, we get the same result each time. This transaction has a reference to its own local state from the time the transaction started. (We are referring to the highest isolation levels here, which correlate well this the blockchain use case).
If we implement a read-only transaction as a reference to state at the time of creation of that transaction, we can then hold these references to various snapshots, one per block that we are interested, and allow the client to multiplex queries and proofs from these various blocks.
If we continue using these concepts (which have informed 30+ years of server side design), we can add a few nice features to our write transactions. The first of which is `Rollback` and `Commit`. That means all the changes we make in this transaction have no effect on the database until they are committed. And until they are committed, we can always abort if we detect an anomaly, returning to the last committed state with a rollback.
There is also a nice extension to this available on some database servers, basically, "nested" transactions or "savepoints". This means that within one transaction, you can open a subtransaction/savepoint and continue work. Later you have the option to commit or rollback all work since the savepoint/subtransaction. And then continue with the main transaction.
If you don't understand why this is useful, look at how basecoin needs to [hold cached state for AppTx](https://github.com/tendermint/basecoin/blob/master/state/execution.go#L126-L149), meaning that it rolls back all modifications if the AppTx returns an error. This was implemented as a wrapper in basecoin, but it is a reasonable thing to support in the DB interface itself (especially since the implementation becomes quite non-trivial as soon as you support range queries).
To give a bit more reference to this concept in practice, read about [Savepoints in Postgresql](https://www.postgresql.org/docs/current/static/tutorial-transactions.html) ([reference](https://www.postgresql.org/docs/current/static/sql-savepoint.html)) or [Nesting transactions in SQL Server](http://dba-presents.com/index.php/databases/sql-server/43-nesting-transactions-and-save-transaction-command) (TL;DR: scroll to the bottom, section "Real nesting transactions with SAVE TRANSACTION")
### Functionality
Merkle trees work with key-value pairs, so we should most importantly focus on the basic Key-Value operations. That is `Get`, `Set`, and `Remove`. We also need to return a merkle proof for any key, along with a root hash of the tree for committing state to the blockchain. This is just the basic merkle-tree stuff.
If it is possible with the implementation, it is nice to provide access to Range Queries. That is, return all values where the key is between X and Y. If you construct your keys wisely, it is possible to store lists (1:N) relations this way. Eg, storing blog posts and the key is blog:`poster_id`:`sequence`, then I could search for all blog posts by a given `poster_id`, or even return just posts 10-19 from the given poster.
The construction of a tree that supports range queries was one of the [design decisions of go-merkle](https://github.com/tendermint/go-merkle/blob/master/README.md). It is also kind of possible with [ethereum's patricia trie](https://github.com/ethereum/wiki/wiki/Patricia-Tree) as long as the key is less than 32 bytes.
In addition to range queries, there is one more nice feature that we could add to our data store - listening to events. Depending on your context, this is "reactive programming", "event emitters", "notifications", etc... But the basic concept is that a client can listen for all changes to a given key (or set of keys), and receive a notification when this happens. This is very important to avoid [repeated polling and wasted queries](http://resthooks.org/) when a client simply wants to [detect changes](https://www.rethinkdb.com/blog/realtime-web/).
If the database provides access to some "listener" functionality, the app can choose to expose this to the external client via websockets, web hooks, http2 push events, android push notifications, etc, etc etc.... But if we want to support modern client functionality, let's add support for this reactive paradigm in our DB interface.
**TODO** support for more advanced backends, eg. Bolt....
### Go Interface
I will start with a simple go interface to illustrate the in-process interface. Once there is agreement on how this looks, we can work out the gRPC bindings to support calling out of process. These interfaces are not finalized code, but I think the demonstrate the concepts better than text and provide a strawman to get feedback.
```
// DB represents the committed state of a merkle-ized key-value store
type DB interface {
// Snapshot returns a reference to last committed state to use for
// providing proofs, you must close it at the end to garbage collect
// the historical state we hold on to to make these proofs
Snapshot() Prover
// Start a transaction - only way to change state
// This will return an error if there is an open Transaction
Begin() (Transaction, error)
// These callbacks are triggered when the Transaction is Committed
// to the DB. They can be used to eg. notify clients via websockets when
// their account balance changes.
AddListener(key []byte, listener Listener)
RemoveListener(listener Listener)
}
// DBReader represents a read-only connection to a snapshot of the db
type DBReader interface {
// Queries on my local view
Has(key []byte) (bool, error)
Get(key []byte) (Model, error)
GetRange(start, end []byte, ascending bool, limit int) ([]Model, error)
Closer
}
// Prover is an interface that lets one query for Proofs, holding the
// data at a specific location in memory
type Prover interface {
DBReader
// Hash is the AppHash (RootHash) for this block
Hash() (hash []byte)
// Prove returns the data along with a merkle Proof
// Model and Proof are nil if not found
Prove(key []byte) (Model, Proof, error)
}
// Transaction is a set of state changes to the DB to be applied atomically.
// There can only be one open transaction at a time, which may only have
// maximum one subtransaction at a time.
// In short, at any time, there is exactly one object that can write to the
// DB, and we can use Subtransactions to group operations and roll them back
// together (kind of like `types.KVCache` from basecoin)
type Transaction interface {
DBReader
// Change the state - will raise error immediately if this Transaction
// is not holding the exclusive write lock
Set(model Model) (err error)
Remove(key []byte) (removed bool, err error)
// Subtransaction starts a new subtransaction, rollback will not affect the
// parent. Only on Commit are the changes applied to this transaction.
// While the subtransaction exists, no write allowed on the parent.
// (You must Commit or Rollback the child to continue)
Subtransaction() Transaction
// Commit this transaction (or subtransaction), the parent reference is
// now updated.
// This only updates persistant store if the top level transaction commits
// (You may have any number of nested sub transactions)
Commit() error
// Rollback ends the transaction and throw away all transaction-local state,
// allowing the tree to prune those elements.
// The parent transaction now recovers the write lock.
Rollback()
}
// Listener registers callbacks on changes to the data store
type Listener interface {
OnSet(key, value, oldValue []byte)
OnRemove(key, oldValue []byte)
}
// Proof represents a merkle proof for a key
type Proof interface {
RootHash() []byte
Verify(key, value, root []byte) bool
}
type Model interface {
Key() []byte
Value() []byte
}
// Closer releases the reference to this state, allowing us to garbage collect
// Make sure to call it before discarding.
type Closer interface {
Close()
}
```
### Remote Interface
The use-case of allowing out-of-process calls is very powerful. Not just to provide a powerful merkle-ready data store to non-go applications.
It we allow the ABCi app to maintain the only writable connections, we can guarantee that all transactions are only processed through the tendermint consensus engine. We could then allow multiple "web server" machines "read-only" access and scale out the database reads, assuming the consensus engine, ABCi logic, and public key cryptography is more the bottleneck than the database. We could even place the consensus engine, ABCi app, and data store on one machine, connected with unix sockets for security, and expose a tcp/ssl interface for reading the data, to scale out query processing over multiple machines.
But returning our focus directly to the ABCi app (which is the most important use case). An app may well want to maintain 100 or 1000 snapshots of different heights to allow people to easily query many proofs at a given height without race conditions (very important for IBC, ask Jae). Thus, we should not require a separate TCP connection for each height, as this gets quite awkward with so many connections. Also, if we want to use gRPC, we should consider the connections potentially transient (although they are more efficient with keep-alive).
Thus, the wire encoding of a transaction or a snapshot should simply return a unique id. All methods on a `Prover` or `Transaction` over the wire can send this id along with the arguments for the method call. And we just need a hash map on the server to map this id to a state.
The only negative of not requiring a persistent tcp connection for each snapshot is there is no auto-detection if the client crashes without explicitly closing the connections. Thus, I would suggest adding a `Ping` thread in the gRPC interface which keeps the Snapshot alive. If no ping is received within a server-defined time, it may automatically close those transactions. And if we consider a client with 500 snapshots that needs to ping each every 10 seconds, that is a lot of overhead, so we should design the ping to accept a list of IDs for the client and update them all. Or associate all snapshots with a clientID and then just send the clientID in the ping. (Please add other ideas on how to detect client crashes without persistent connections).
To encourage adoption, we should provide a nice client that uses this gRPC interface (like we do with ABCi). For go, the client may have the exact same interface as the in-process version, just that the error call may return network errors, not just illegal operations. We should also add a client with a clean API for Java, since that seems to be popular among app developers in the current tendermint community. Other bindings as we see the need in the server space.
### Persistence
Any data store worth it's name should not lose all data on a crash. Even [redis provides some persistence](https://redis.io/topics/persistence) these days. Ideally, if the system crashes and restarts, it should have the data at the last block N that was committed. If the system crash during the commit of block N+1, then the recovered state should either be block N or completely committed block N+1, but no partial state between the two. Basically, the commit must be an atomic operation (even if updating 100's of records).
To avoid a lot of headaches ourselves, we can use an existing data store, such as leveldb, which provides `WriteBatch` to group all operations.
The other issue is cleaning up old state. We cannot delete any information from our persistent store, as long as any snapshot holds a reference to it (or else we get some panics when the data we query is not there). So, we need to store the outstanding deletions that we can perform when the snapshot is `Close`d. In addition, we must consider the case that the data store crashes with open snapshots. Thus, the info on outstanding deletions must also be persisted somewhere. Something like a "delete-behind log" (the opposite of a "write ahead log").
This is not a concern of the generic interface, but each implementation should take care to handle this well to avoid accumulation of unused references in the data store and eventual data bloat.
#### Backing stores
It is way outside the scope of this project to build our own database that is capable of efficiently storing the data, provide multiple read-only snapshots at once, and save it atomically. The best approach seems to select an existing database (best a simple one) that provides this functionality and build upon it, much like the current `go-merkle` implementation builds upon `leveldb`. After some research here are winners and losers:
**Winners**
* Leveldb - [provides consistent snapshots](https://ayende.com/blog/161705/reviewing-leveldb-part-xiii-smile-and-here-is-your-snapshot), and [provides tooling for building ACID compliance](http://codeofrob.com/entries/writing-a-transaction-manager-on-top-of-leveldb.html)
* Note there are at least two solid implementations available in go - [goleveldb](https://github.com/syndtr/goleveldb) - a pure go implementation, and [levigo](https://github.com/jmhodges/levigo) - a go wrapper around leveldb.
* Goleveldb is much easier to compile and cross-compile (not requiring cgo), while levigo (or cleveldb) seems to provide a significant performance boosts (but I had trouble even running benchmarks)
* PostgreSQL - fully supports these ACID semantics if you call `SET TRANSACTION ISOLATION LEVEL SERIALIZABLE` at the beginning of a transaction (tested)
* This may be total overkill unless we also want to make use of other features, like storing data in multiple columns with secondary indexes.
* Trillian can show an example of [how to store a merkle tree in sql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go)
**Losers**
* Bolt - open [read-only snapshots can block writing](https://github.com/boltdb/bolt/issues/378)
* Mongo - [barely even supports atomic operations](https://docs.mongodb.com/manual/core/write-operations-atomicity/), much less multiple snapshots
**To investigate**
* [Trillian](https://github.com/google/trillian) - has a [persistent merkle tree interface](https://github.com/google/trillian/blob/master/storage/tree_storage.go) along with [backend storage with mysql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go), good inspiration for our design if not directly using it
* [Moss](https://github.com/couchbase/moss) - another key-value store in go, seems similar to leveldb, maybe compare with performance tests?
### Security
When allowing access out-of-process, we should provide different mechanisms to secure it. The first is the choice of binding to a local unix socket or a tcp port. The second is the optional use of ssl to encrypt the connection (very important over tcp). The third is authentication to control access to the database.
We may also want to consider the case of two server connections with different permissions, eg. a local unix socket that allows write access with no more credentials, and a public TCP connection with ssl and authentication that only provides read-only access.
The use of ssl is quite easy in go, we just need to generate and sign a certificate, so it is nice to be able to disable it for dev machines, but it is very important for production.
For authentication, let me sketch out a minimal solution. The server could just have a simple config file with key/bcrypt(password) pairs along with read/write permission level, and read that upon startup. The client must provide a username and password in the HTTP headers when making the original HTTPS gRPC connection.
This is super minimal to provide some protection. Things like LDAP, OAuth and single-sign on seem overkill and even potential security holes. Maybe there is another solution somewhere in the middle.

+ 17
- 0
docs/architecture/merkle.md View File

@ -0,0 +1,17 @@
# Merkle data stores
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implemention of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an apphash to the consensus engine, as well as a full proof to any client.
This engine is currently implemented in `go-merkle` with `merkleeyes` providing a language-agnostic binding via ABCi. It uses `go-db` bindings internally to persist data to leveldb.
What are some of the requirements of this store:
* It must support efficient key-value operations (get/set/delete)
* It must support persistance.
* We must only persist complete blocks, so when we come up after a crash we are at the state of block N or N+1, but not in-between these two states.
* It must allow us to read/write from one uncommited state (working state), while serving other queries from the last commited state. And a way to determine which one to serve for each client.
* It must allow us to hold references to old state, to allow providing proofs from 20 blocks ago. We can define some limits as to the maximum time to hold this data.
* We provide in process binding in Go
* We provide language-agnostic bindings when running the data store as it's own process.

+ 42
- 27
glide.lock View File

@ -1,18 +1,22 @@
hash: dcaf3fb1290b0d7942c86f0644a7431ac313247936eab9515b1ade9ffe579848
updated: 2017-01-13T00:30:55.237750829-05:00
hash: 41f8fec708e98b7f8c4804be46008493199fa45e89b2d5dc237fd65fe431c62f
updated: 2017-03-06T04:01:33.319604992-05:00
imports:
- name: github.com/btcsuite/btcd
version: 153dca5c1e4b5d1ea1523592495e5bedfa503391
version: d06c0bb181529331be8f8d9350288c420d9e60e4
subpackages:
- btcec
- name: github.com/BurntSushi/toml
version: 99064174e013895bbd9b025c31100bd1d9b590ca
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
subpackages:
- spew
- name: github.com/ebuchman/fail-test
version: c1eddaa09da2b4017351245b0d43234955276798
version: 13f91f14c826314205cdbed1ec8ac8bf08e03381
- name: github.com/go-stack/stack
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
- name: github.com/gogo/protobuf
version: f9114dace7bd920b32f943b3c73fafbcbab2bf31
version: 909568be09de550ed094403c2bf8a261b5bb730a
subpackages:
- proto
- name: github.com/golang/protobuf
@ -22,15 +26,26 @@ imports:
- name: github.com/golang/snappy
version: d9eb7a3d35ec988b8585d4a0068e462c27d28380
- name: github.com/gorilla/websocket
version: 17634340a83afe0cab595e40fbc63f6ffa1d8915
version: 3ab3a8b8831546bd18fd182c20687ca853b2bb13
- name: github.com/jmhodges/levigo
version: c42d9e0ca023e2198120196f842701bb4c55d7b9
- name: github.com/mattn/go-colorable
version: d228849504861217f796da67fae4f6e347643f15
- name: github.com/mattn/go-isatty
version: 30a891c33c7cde7b02a981314b4228ec99380cca
- name: github.com/pkg/errors
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
- name: github.com/spf13/pflag
version: 25f8b5b07aece3207895bf19f7ab517eb3b22a40
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
subpackages:
- assert
- require
- name: github.com/syndtr/goleveldb
version: 23851d93a2292dcc56e71a18ec9e0624d84a0f65
subpackages:
@ -47,12 +62,11 @@ imports:
- leveldb/table
- leveldb/util
- name: github.com/tendermint/abci
version: 699d45bc678865b004b90213bf88a950f420973b
version: 1236e8fb6eee3a63909f4014a8e84385ead7933d
subpackages:
- client
- example/counter
- example/dummy
- example/nil
- server
- types
- name: github.com/tendermint/ed25519
@ -61,21 +75,23 @@ imports:
- edwards25519
- extra25519
- name: github.com/tendermint/go-autofile
version: 0416e0aa9c68205aa44844096f9f151ada9d0405
version: 48b17de82914e1ec2f134ce823ba426337d2c518
- name: github.com/tendermint/go-clist
version: 3baa390bbaf7634251c42ad69a8682e7e3990552
- name: github.com/tendermint/go-common
version: e289af53b6bf6af28da129d9ef64389a4cf7987f
version: dcb015dff6c7af21e65c8e2f3b450df19d38c777
subpackages:
- test
- name: github.com/tendermint/go-config
version: e64b424499acd0eb9856b88e10c0dff41628c0d6
version: 620dcbbd7d587cf3599dedbf329b64311b0c307a
- name: github.com/tendermint/go-crypto
version: 4b11d62bdb324027ea01554e5767b71174680ba0
version: 3f47cfac5fcd9e0f1727c7db980b3559913b3e3a
- name: github.com/tendermint/go-data
version: 32271140e8fd5abdbb22e268d7a02421fa382f0b
- name: github.com/tendermint/go-db
version: 72f6dacd22a686cdf7fcd60286503e3aceda77ba
version: eac3f2bc147023957c8bf69432a4e6c4dc5c3f72
- name: github.com/tendermint/go-events
version: fddee66d90305fccb6f6d84d16c34fa65ea5b7f6
version: f8ffbfb2be3483e9e7927495590a727f51c0c11f
- name: github.com/tendermint/go-flowrate
version: a20c98e61957faa93b4014fbd902f20ab9317a6a
subpackages:
@ -83,9 +99,9 @@ imports:
- name: github.com/tendermint/go-logger
version: cefb3a45c0bf3c493a04e9bcd9b1540528be59f2
- name: github.com/tendermint/go-merkle
version: 7a86b4486f2cd84ac885c5bbc609fdee2905f5d1
version: 714d4d04557fd068a7c2a1748241ce8428015a96
- name: github.com/tendermint/go-p2p
version: 3d98f675f30dc4796546b8b890f895926152fa8d
version: 97a5ed2d1a17eaee8717b8a32cfaf7a9a82a273d
subpackages:
- upnp
- name: github.com/tendermint/go-rpc
@ -95,11 +111,17 @@ imports:
- server
- types
- name: github.com/tendermint/go-wire
version: 2f3b7aafe21c80b19b6ee3210ecb3e3d07c7a471
version: f530b7af7a8b06e612c2063bff6ace49060a085e
- name: github.com/tendermint/log15
version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6
subpackages:
- term
- name: github.com/tendermint/merkleeyes
version: 9fb76efa5aebe773a598f97e68e75fe53d520e70
subpackages:
- app
- client
- testutil
- name: golang.org/x/crypto
version: 7c6cc321c680f03b9ef0764448e780704f486b51
subpackages:
@ -112,7 +134,7 @@ imports:
- ripemd160
- salsa20/salsa
- name: golang.org/x/net
version: 60c41d1de8da134c05b7b40154a9a82bf5b7edb9
version: 61557ac0112b576429a0df080e1c2cef5dfbb642
subpackages:
- context
- http2
@ -125,15 +147,8 @@ imports:
version: d75a52659825e75fff6158388dddc6a5b04f9ba5
subpackages:
- unix
- name: golang.org/x/text
version: 44f4f658a783b0cee41fe0a23b8fc91d9c120558
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/grpc
version: 50955793b0183f9de69bd78e2ec251cf20aab121
version: cbcceb2942a489498cf22b2f918536e819d33f0a
subpackages:
- codes
- credentials


+ 34
- 10
glide.yaml View File

@ -1,29 +1,53 @@
package: github.com/tendermint/tendermint
import:
- package: github.com/gogo/protobuf
subpackages:
- proto
- package: github.com/gorilla/websocket
- package: github.com/spf13/pflag
- package: github.com/tendermint/ed25519
- package: github.com/tendermint/go-flowrate
- package: github.com/tendermint/go-autofile
version: develop
- package: github.com/tendermint/go-clist
version: develop
- package: github.com/tendermint/go-common
version: develop
- package: github.com/tendermint/go-config
version: develop
- package: github.com/tendermint/go-crypto
version: develop
- package: github.com/tendermint/go-db
version: develop
- package: github.com/tendermint/go-events
version: develop
- package: github.com/tendermint/go-logger
version: develop
- package: github.com/tendermint/go-merkle
version: develop
- package: github.com/tendermint/go-p2p
version: develop
- package: github.com/tendermint/go-rpc
version: develop
- package: github.com/tendermint/go-wire
- package: github.com/tendermint/log15
version: develop
- package: github.com/tendermint/abci
version: develop
- package: github.com/tendermint/go-flowrate
- package: github.com/tendermint/log15
- package: github.com/tendermint/ed25519
- package: github.com/tendermint/merkleeyes
version: develop
subpackages:
- app
- package: github.com/gogo/protobuf
version: ^0.3
subpackages:
- proto
- package: github.com/gorilla/websocket
version: ^1.1.0
- package: github.com/spf13/pflag
- package: github.com/pkg/errors
version: ^0.8.0
- package: golang.org/x/crypto
subpackages:
- ripemd160
- package: github.com/tendermint/go-flowrate
testImport:
- package: github.com/stretchr/testify
version: ^1.1.4
subpackages:
- flowrate
- assert
- require

+ 4
- 4
mempool/mempool.go View File

@ -7,13 +7,13 @@ import (
"sync/atomic"
"time"
abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/go-autofile"
"github.com/tendermint/go-clist"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
abci "github.com/tendermint/abci/types"
)
/*
@ -249,7 +249,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
// Get the valid transactions remaining
// If maxTxs is -1, there is no cap on returned transactions.
func (mem *Mempool) Reap(maxTxs int) []types.Tx {
func (mem *Mempool) Reap(maxTxs int) types.Txs {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
@ -263,7 +263,7 @@ func (mem *Mempool) Reap(maxTxs int) []types.Tx {
}
// maxTxs: -1 means uncapped, 0 means none
func (mem *Mempool) collectTxs(maxTxs int) []types.Tx {
func (mem *Mempool) collectTxs(maxTxs int) types.Txs {
if maxTxs == 0 {
return []types.Tx{}
} else if maxTxs < 0 {
@ -281,7 +281,7 @@ func (mem *Mempool) collectTxs(maxTxs int) []types.Tx {
// Mempool will discard these txs.
// NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller
func (mem *Mempool) Update(height int, txs []types.Tx) {
func (mem *Mempool) Update(height int, txs types.Txs) {
// TODO: check err ?
mem.proxyAppConn.FlushSync() // To flush async resCb calls e.g. from CheckTx


+ 2
- 3
mempool/reactor.go View File

@ -6,13 +6,12 @@ import (
"reflect"
"time"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-clist"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
abci "github.com/tendermint/abci/types"
)
const (
@ -80,7 +79,7 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
}
// broadcasting happens from go routines per peer
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
log.Warn(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}


+ 131
- 175
node/node.go View File

@ -2,13 +2,13 @@ package node
import (
"bytes"
"io/ioutil"
"errors"
"net"
"net/http"
"strings"
"time"
. "github.com/tendermint/go-common"
abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
@ -25,23 +25,32 @@ import (
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
import _ "net/http/pprof"
_ "net/http/pprof"
)
type Node struct {
config cfg.Config
sw *p2p.Switch
evsw types.EventSwitch
blockStore *bc.BlockStore
bcReactor *bc.BlockchainReactor
mempoolReactor *mempl.MempoolReactor
consensusState *consensus.ConsensusState
consensusReactor *consensus.ConsensusReactor
privValidator *types.PrivValidator
genesisDoc *types.GenesisDoc
privKey crypto.PrivKeyEd25519
proxyApp proxy.AppConns
cmn.BaseService
// config
config cfg.Config // user config
genesisDoc *types.GenesisDoc // initial validator set
privValidator *types.PrivValidator // local node's validator key
// network
privKey crypto.PrivKeyEd25519 // local node's p2p key
sw *p2p.Switch // p2p connections
addrBook *p2p.AddrBook // known peers
// services
evsw types.EventSwitch // pub/sub for services
blockStore *bc.BlockStore // store the blockchain to disk
bcReactor *bc.BlockchainReactor // for fast-syncing
mempoolReactor *mempl.MempoolReactor // for gossipping transactions
consensusState *consensus.ConsensusState // latest consensus state
consensusReactor *consensus.ConsensusReactor // for participating in the consensus
proxyApp proxy.AppConns // connection to the application
rpcListeners []net.Listener // rpc servers
}
func NewNodeDefault(config cfg.Config) *Node {
@ -57,21 +66,23 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State db
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.GetState(config, stateDB)
// add the chainid and number of validators to the global config
config.Set("chain_id", state.ChainID)
config.Set("num_vals", state.Validators.Size())
// Create the proxyApp, which manages connections (consensus, mempool, query)
proxyApp := proxy.NewAppConns(config, clientCreator, sm.NewHandshaker(config, state, blockStore))
// and sync tendermint and the app by replaying any necessary blocks
proxyApp := proxy.NewAppConns(config, clientCreator, consensus.NewHandshaker(config, state, blockStore))
if _, err := proxyApp.Start(); err != nil {
Exit(Fmt("Error starting proxy app connections: %v", err))
cmn.Exit(cmn.Fmt("Error starting proxy app connections: %v", err))
}
// add the chainid and number of validators to the global config
config.Set("chain_id", state.ChainID)
config.Set("num_vals", state.Validators.Size())
// reload the state (it may have been updated by the handshake)
state = sm.LoadState(stateDB)
// Generate node PrivKey
privKey := crypto.GenPrivKeyEd25519()
@ -80,7 +91,7 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
eventSwitch := types.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
Exit(Fmt("Failed to start switch: %v", err))
cmn.Exit(cmn.Fmt("Failed to start switch: %v", err))
}
// Decide whether to fast-sync or not
@ -114,32 +125,37 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
sw.AddReactor("CONSENSUS", consensusReactor)
// Optionally, start the pex reactor
// TODO: this is a dev feature, it needs some love
var addrBook *p2p.AddrBook
if config.GetBool("pex_reactor") {
addrBook := p2p.NewAddrBook(config.GetString("addrbook_file"), config.GetBool("addrbook_strict"))
addrBook.Start()
addrBook = p2p.NewAddrBook(config.GetString("addrbook_file"), config.GetBool("addrbook_strict"))
pexReactor := p2p.NewPEXReactor(addrBook)
sw.AddReactor("PEX", pexReactor)
}
// filter peers by addr or pubkey with a abci query.
// if the query return code is OK, add peer
// XXX: query format subject to change
// Filter peers by addr or pubkey with an ABCI query.
// If the query return code is OK, add peer.
// XXX: Query format subject to change
if config.GetBool("filter_peers") {
// NOTE: addr is ip:port
sw.SetAddrFilter(func(addr net.Addr) error {
res := proxyApp.Query().QuerySync([]byte(Fmt("p2p/filter/addr/%s", addr.String())))
if res.IsOK() {
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())})
if err != nil {
return err
}
if resQuery.Code.IsOK() {
return nil
}
return res
return errors.New(resQuery.Code.String())
})
sw.SetPubKeyFilter(func(pubkey crypto.PubKeyEd25519) error {
res := proxyApp.Query().QuerySync([]byte(Fmt("p2p/filter/pubkey/%X", pubkey.Bytes())))
if res.IsOK() {
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/pubkey/%X", pubkey.Bytes())})
if err != nil {
return err
}
if resQuery.Code.IsOK() {
return nil
}
return res
return errors.New(resQuery.Code.String())
})
}
@ -156,34 +172,84 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
}()
}
return &Node{
config: config,
sw: sw,
node := &Node{
config: config,
genesisDoc: state.GenesisDoc,
privValidator: privValidator,
privKey: privKey,
sw: sw,
addrBook: addrBook,
evsw: eventSwitch,
blockStore: blockStore,
bcReactor: bcReactor,
mempoolReactor: mempoolReactor,
consensusState: consensusState,
consensusReactor: consensusReactor,
privValidator: privValidator,
genesisDoc: state.GenesisDoc,
privKey: privKey,
proxyApp: proxyApp,
}
node.BaseService = *cmn.NewBaseService(log, "Node", node)
return node
}
// Call Start() after adding the listeners.
func (n *Node) Start() error {
func (n *Node) OnStart() error {
n.BaseService.OnStart()
// Create & add listener
protocol, address := ProtocolAndAddress(n.config.GetString("node_laddr"))
l := p2p.NewDefaultListener(protocol, address, n.config.GetBool("skip_upnp"))
n.sw.AddListener(l)
// Start the switch
n.sw.SetNodeInfo(makeNodeInfo(n.config, n.sw, n.privKey))
n.sw.SetNodePrivKey(n.privKey)
_, err := n.sw.Start()
return err
if err != nil {
return err
}
// If seeds exist, add them to the address book and dial out
if n.config.GetString("seeds") != "" {
// dial out
seeds := strings.Split(n.config.GetString("seeds"), ",")
if err := n.DialSeeds(seeds); err != nil {
return err
}
}
// Run the RPC server
if n.config.GetString("rpc_laddr") != "" {
listeners, err := n.startRPC()
if err != nil {
return err
}
n.rpcListeners = listeners
}
return nil
}
func (n *Node) Stop() {
func (n *Node) OnStop() {
n.BaseService.OnStop()
log.Notice("Stopping Node")
// TODO: gracefully disconnect from peers.
n.sw.Stop()
for _, l := range n.rpcListeners {
log.Info("Closing rpc listener", "listener", l)
if err := l.Close(); err != nil {
log.Error("Error closing listener", "listener", l, "error", err)
}
}
}
func (n *Node) RunForever() {
// Sleep forever and then...
cmn.TrapSignal(func() {
n.Stop()
})
}
// Add the event switch to reactors, mempool, etc.
@ -197,13 +263,13 @@ func SetEventSwitch(evsw types.EventSwitch, eventables ...types.Eventable) {
// Add listeners before starting the Node.
// The first listener is the primary listener (in NodeInfo)
func (n *Node) AddListener(l p2p.Listener) {
log.Notice(Fmt("Added %v", l))
n.sw.AddListener(l)
}
func (n *Node) StartRPC() ([]net.Listener, error) {
// ConfigureRPC sets all variables in rpccore so they will serve
// rpc calls from this node
func (n *Node) ConfigureRPC() {
rpccore.SetConfig(n.config)
rpccore.SetEventSwitch(n.evsw)
rpccore.SetBlockStore(n.blockStore)
rpccore.SetConsensusState(n.consensusState)
@ -211,8 +277,12 @@ func (n *Node) StartRPC() ([]net.Listener, error) {
rpccore.SetSwitch(n.sw)
rpccore.SetPubKey(n.privValidator.PubKey)
rpccore.SetGenesisDoc(n.genesisDoc)
rpccore.SetAddrBook(n.addrBook)
rpccore.SetProxyAppQuery(n.proxyApp.Query())
}
func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC()
listenAddrs := strings.Split(n.config.GetString("rpc_laddr"), ",")
// we may expose the rpc over both a unix and tcp socket
@ -287,16 +357,16 @@ func makeNodeInfo(config cfg.Config, sw *p2p.Switch, privKey crypto.PrivKeyEd255
Network: config.GetString("chain_id"),
Version: version.Version,
Other: []string{
Fmt("wire_version=%v", wire.Version),
Fmt("p2p_version=%v", p2p.Version),
Fmt("consensus_version=%v", consensus.Version),
Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
cmn.Fmt("wire_version=%v", wire.Version),
cmn.Fmt("p2p_version=%v", p2p.Version),
cmn.Fmt("consensus_version=%v", consensus.Version),
cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
},
}
// include git hash in the nodeInfo if available
if rev, err := ReadFile(config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, Fmt("revision=%v", string(rev)))
if rev, err := cmn.ReadFile(config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev)))
}
if !sw.IsListening() {
@ -311,133 +381,19 @@ func makeNodeInfo(config cfg.Config, sw *p2p.Switch, privKey crypto.PrivKeyEd255
// We assume that the rpcListener has the same ExternalAddress.
// This is probably true because both P2P and RPC listeners use UPnP,
// except of course if the rpc is only bound to localhost
nodeInfo.ListenAddr = Fmt("%v:%v", p2pHost, p2pPort)
nodeInfo.Other = append(nodeInfo.Other, Fmt("rpc_addr=%v", rpcListenAddr))
nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort)
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr))
return nodeInfo
}
//------------------------------------------------------------------------------
// Users wishing to:
// * use an external signer for their validators
// * supply an in-proc abci app
// should fork tendermint/tendermint and implement RunNode to
// call NewNode with their custom priv validator and/or custom
// proxy.ClientCreator interface
func RunNode(config cfg.Config) {
// Wait until the genesis doc becomes available
genDocFile := config.GetString("genesis_file")
if !FileExists(genDocFile) {
log.Notice(Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
}
genDoc := types.GenesisDocFromJSON(jsonBlob)
if genDoc.ChainID == "" {
PanicSanity(Fmt("Genesis doc %v must include non-empty chain_id", genDocFile))
}
config.Set("chain_id", genDoc.ChainID)
}
}
// Create & start node
n := NewNodeDefault(config)
protocol, address := ProtocolAndAddress(config.GetString("node_laddr"))
l := p2p.NewDefaultListener(protocol, address, config.GetBool("skip_upnp"))
n.AddListener(l)
err := n.Start()
if err != nil {
Exit(Fmt("Failed to start node: %v", err))
}
log.Notice("Started node", "nodeInfo", n.sw.NodeInfo())
// If seedNode is provided by config, dial out.
if config.GetString("seeds") != "" {
seeds := strings.Split(config.GetString("seeds"), ",")
n.sw.DialSeeds(seeds)
}
// Run the RPC server.
if config.GetString("rpc_laddr") != "" {
_, err := n.StartRPC()
if err != nil {
PanicCrisis(err)
}
}
// Sleep forever and then...
TrapSignal(func() {
n.Stop()
})
}
func (n *Node) NodeInfo() *p2p.NodeInfo {
return n.sw.NodeInfo()
}
func (n *Node) DialSeeds(seeds []string) {
n.sw.DialSeeds(seeds)
}
//------------------------------------------------------------------------------
// replay
// convenience for replay mode
func newConsensusState(config cfg.Config) *consensus.ConsensusState {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
// Create proxyAppConn connection (consensus, mempool, query)
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), sm.NewHandshaker(config, state, blockStore))
_, err := proxyApp.Start()
if err != nil {
Exit(Fmt("Error starting proxy app conns: %v", err))
}
// add the chainid to the global config
config.Set("chain_id", state.ChainID)
// Make event switch
eventSwitch := types.NewEventSwitch()
if _, err := eventSwitch.Start(); err != nil {
Exit(Fmt("Failed to start event switch: %v", err))
}
mempool := mempl.NewMempool(config, proxyApp.Mempool())
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusState.SetEventSwitch(eventSwitch)
return consensusState
}
func RunReplayConsole(config cfg.Config, walFile string) {
consensusState := newConsensusState(config)
if err := consensusState.ReplayConsole(walFile); err != nil {
Exit(Fmt("Error during consensus replay: %v", err))
}
}
func RunReplay(config cfg.Config, walFile string) {
consensusState := newConsensusState(config)
if err := consensusState.ReplayMessages(walFile); err != nil {
Exit(Fmt("Error during consensus replay: %v", err))
}
log.Notice("Replay run successfully")
func (n *Node) DialSeeds(seeds []string) error {
return n.sw.DialSeeds(n.addrBook, seeds)
}
// Defaults to tcp


+ 4
- 4
node/node_test.go View File

@ -4,7 +4,6 @@ import (
"testing"
"time"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/config/tendermint_test"
)
@ -13,12 +12,13 @@ func TestNodeStartStop(t *testing.T) {
// Create & start node
n := NewNodeDefault(config)
protocol, address := ProtocolAndAddress(config.GetString("node_laddr"))
l := p2p.NewDefaultListener(protocol, address, config.GetBool("skip_upnp"))
n.AddListener(l)
n.Start()
log.Notice("Started node", "nodeInfo", n.sw.NodeInfo())
// Wait a bit to initialize
// TODO remove time.Sleep(), make asynchronous.
time.Sleep(time.Second * 2)
ch := make(chan struct{}, 1)
go func() {
n.Stop()


+ 4
- 4
proxy/app_conn.go View File

@ -34,8 +34,8 @@ type AppConnQuery interface {
Error() error
EchoSync(string) (res types.Result)
InfoSync() (types.ResponseInfo, error)
QuerySync(tx []byte) (res types.Result)
InfoSync() (resInfo types.ResponseInfo, err error)
QuerySync(reqQuery types.RequestQuery) (resQuery types.ResponseQuery, err error)
// SetOptionSync(key string, value string) (res types.Result)
}
@ -139,6 +139,6 @@ func (app *appConnQuery) InfoSync() (types.ResponseInfo, error) {
return app.appConn.InfoSync()
}
func (app *appConnQuery) QuerySync(tx []byte) (res types.Result) {
return app.appConn.QuerySync(tx)
func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (types.ResponseQuery, error) {
return app.appConn.QuerySync(reqQuery)
}

+ 2
- 3
proxy/client.go View File

@ -4,11 +4,10 @@ import (
"fmt"
"sync"
cfg "github.com/tendermint/go-config"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/example/dummy"
nilapp "github.com/tendermint/abci/example/nil"
"github.com/tendermint/abci/types"
cfg "github.com/tendermint/go-config"
)
// NewABCIClient returns newly connected client
@ -74,7 +73,7 @@ func DefaultClientCreator(config cfg.Config) ClientCreator {
case "persistent_dummy":
return NewLocalClientCreator(dummy.NewPersistentDummyApplication(config.GetString("db_dir")))
case "nilapp":
return NewLocalClientCreator(nilapp.NewNilApplication())
return NewLocalClientCreator(types.NewBaseApplication())
default:
mustConnect := false // loop retrying
return NewRemoteClientCreator(addr, transport, mustConnect)


+ 4
- 4
proxy/multi_app_conn.go View File

@ -1,7 +1,7 @@
package proxy
import (
. "github.com/tendermint/go-common"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
)
@ -9,7 +9,7 @@ import (
// Tendermint's interface to the application consists of multiple connections
type AppConns interface {
Service
cmn.Service
Mempool() AppConnMempool
Consensus() AppConnConsensus
@ -32,7 +32,7 @@ type Handshaker interface {
// which ensures the app and tendermint are synced.
// TODO: on app restart, clients must reboot together
type multiAppConn struct {
BaseService
cmn.BaseService
config cfg.Config
@ -52,7 +52,7 @@ func NewMultiAppConn(config cfg.Config, clientCreator ClientCreator, handshaker
handshaker: handshaker,
clientCreator: clientCreator,
}
multiAppConn.BaseService = *NewBaseService(log, "multiAppConn", multiAppConn)
multiAppConn.BaseService = *cmn.NewBaseService(log, "multiAppConn", multiAppConn)
return multiAppConn
}


+ 65
- 0
rpc/client/event_test.go View File

@ -0,0 +1,65 @@
package client_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
merktest "github.com/tendermint/merkleeyes/testutil"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
)
func TestHeaderEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
// start for this test it if it wasn't already running
if !c.IsRunning() {
// if so, then we start it, listen, and stop it.
st, err := c.Start()
require.Nil(err, "%d: %+v", i, err)
require.True(st, "%d", i)
defer c.Stop()
}
evtTyp := types.EventStringNewBlockHeader()
evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second)
require.Nil(err, "%d: %+v", i, err)
_, ok := evt.(types.EventDataNewBlockHeader)
require.True(ok, "%d: %#v", i, evt)
// TODO: more checks...
}
}
func TestTxEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
// start for this test it if it wasn't already running
if !c.IsRunning() {
// if so, then we start it, listen, and stop it.
st, err := c.Start()
require.Nil(err, "%d: %+v", i, err)
require.True(st, "%d", i)
defer c.Stop()
}
// make the tx
_, _, tx := merktest.MakeTxKV()
evtTyp := types.EventStringTx(types.Tx(tx))
// send async
txres, err := c.BroadcastTxAsync(tx)
require.Nil(err, "%+v", err)
require.True(txres.Code.IsOK())
// and wait for confirmation
evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second)
require.Nil(err, "%d: %+v", i, err)
// and make sure it has the proper info
txe, ok := evt.(types.EventDataTx)
require.True(ok, "%d: %#v", i, evt)
// make sure this is the proper tx
require.EqualValues(tx, txe.Tx)
require.True(txe.Code.IsOK())
}
}

+ 88
- 0
rpc/client/helpers.go View File

@ -0,0 +1,88 @@
package client
import (
"time"
"github.com/pkg/errors"
cmn "github.com/tendermint/go-common"
events "github.com/tendermint/go-events"
"github.com/tendermint/tendermint/types"
)
// Waiter is informed of current height, decided whether to quit early
type Waiter func(delta int) (abort error)
// DefaultWaitStrategy is the standard backoff algorithm,
// but you can plug in another one
func DefaultWaitStrategy(delta int) (abort error) {
if delta > 10 {
return errors.Errorf("Waiting for %d blocks... aborting", delta)
} else if delta > 0 {
// estimate of wait time....
// wait half a second for the next block (in progress)
// plus one second for every full block
delay := time.Duration(delta-1)*time.Second + 500*time.Millisecond
time.Sleep(delay)
}
return nil
}
// Wait for height will poll status at reasonable intervals until
// the block at the given height is available.
//
// If waiter is nil, we use DefaultWaitStrategy, but you can also
// provide your own implementation
func WaitForHeight(c StatusClient, h int, waiter Waiter) error {
if waiter == nil {
waiter = DefaultWaitStrategy
}
delta := 1
for delta > 0 {
s, err := c.Status()
if err != nil {
return err
}
delta = h - s.LatestBlockHeight
// wait for the time, or abort early
if err := waiter(delta); err != nil {
return err
}
}
return nil
}
// WaitForOneEvent subscribes to a websocket event for the given
// event time and returns upon receiving it one time, or
// when the timeout duration has expired.
//
// This handles subscribing and unsubscribing under the hood
func WaitForOneEvent(evsw types.EventSwitch,
evtTyp string, timeout time.Duration) (types.TMEventData, error) {
listener := cmn.RandStr(12)
evts, quit := make(chan events.EventData, 10), make(chan bool, 1)
// start timeout count-down
go func() {
time.Sleep(timeout)
quit <- true
}()
// register for the next event of this type
evsw.AddListenerForEvent(listener, evtTyp, func(data events.EventData) {
evts <- data
})
// make sure to unregister after the test is over
defer evsw.RemoveListenerForEvent(evtTyp, listener)
// defer evsw.RemoveListener(listener) // this also works
select {
case <-quit:
return nil, errors.New("timed out waiting for event")
case evt := <-evts:
tmevt, ok := evt.(types.TMEventData)
if ok {
return tmevt, nil
}
return nil, errors.Errorf("Got unexpected event type: %#v", evt)
}
}

+ 76
- 0
rpc/client/helpers_test.go View File

@ -0,0 +1,76 @@
package client_test
import (
"errors"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/client/mock"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
func TestWaitForHeight(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// test with error result - immediate failure
m := &mock.StatusMock{
Call: mock.Call{
Error: errors.New("bye"),
},
}
r := mock.NewStatusRecorder(m)
// connection failure always leads to error
err := client.WaitForHeight(r, 8, nil)
require.NotNil(err)
require.Equal("bye", err.Error())
// we called status once to check
require.Equal(1, len(r.Calls))
// now set current block height to 10
m.Call = mock.Call{
Response: &ctypes.ResultStatus{LatestBlockHeight: 10},
}
// we will not wait for more than 10 blocks
err = client.WaitForHeight(r, 40, nil)
require.NotNil(err)
require.True(strings.Contains(err.Error(), "aborting"))
// we called status once more to check
require.Equal(2, len(r.Calls))
// waiting for the past returns immediately
err = client.WaitForHeight(r, 5, nil)
require.Nil(err)
// we called status once more to check
require.Equal(3, len(r.Calls))
// since we can't update in a background goroutine (test --race)
// we use the callback to update the status height
myWaiter := func(delta int) error {
// update the height for the next call
m.Call.Response = &ctypes.ResultStatus{LatestBlockHeight: 15}
return client.DefaultWaitStrategy(delta)
}
// we wait for a few blocks
err = client.WaitForHeight(r, 12, myWaiter)
require.Nil(err)
// we called status once to check
require.Equal(5, len(r.Calls))
pre := r.Calls[3]
require.Nil(pre.Error)
prer, ok := pre.Response.(*ctypes.ResultStatus)
require.True(ok)
assert.Equal(10, prer.LatestBlockHeight)
post := r.Calls[4]
require.Nil(post.Error)
postr, ok := post.Response.(*ctypes.ResultStatus)
require.True(ok)
assert.Equal(15, postr.LatestBlockHeight)
}

+ 349
- 0
rpc/client/httpclient.go View File

@ -0,0 +1,349 @@
package client
import (
"fmt"
"github.com/pkg/errors"
events "github.com/tendermint/go-events"
"github.com/tendermint/go-rpc/client"
wire "github.com/tendermint/go-wire"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
/*
HTTP is a Client implementation that communicates
with a tendermint node over json rpc and websockets.
This is the main implementation you probably want to use in
production code. There are other implementations when calling
the tendermint node in-process (local), or when you want to mock
out the server for test code (mock).
*/
type HTTP struct {
remote string
rpc *rpcclient.ClientJSONRPC
*WSEvents
}
// New takes a remote endpoint in the form tcp://<host>:<port>
// and the websocket path (which always seems to be "/websocket")
func NewHTTP(remote, wsEndpoint string) *HTTP {
return &HTTP{
rpc: rpcclient.NewClientJSONRPC(remote),
remote: remote,
WSEvents: newWSEvents(remote, wsEndpoint),
}
}
func (c *HTTP) _assertIsClient() Client {
return c
}
func (c *HTTP) _assertIsNetworkClient() NetworkClient {
return c
}
func (c *HTTP) _assertIsEventSwitch() types.EventSwitch {
return c
}
func (c *HTTP) Status() (*ctypes.ResultStatus, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("status", []interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Status")
}
// note: panics if rpc doesn't match. okay???
return (*tmResult).(*ctypes.ResultStatus), nil
}
func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("abci_info", []interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "ABCIInfo")
}
return (*tmResult).(*ctypes.ResultABCIInfo), nil
}
func (c *HTTP) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("abci_query", []interface{}{path, data, prove}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "ABCIQuery")
}
return (*tmResult).(*ctypes.ResultABCIQuery), nil
}
func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "broadcast_tx_commit")
}
return (*tmResult).(*ctypes.ResultBroadcastTxCommit), nil
}
func (c *HTTP) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.broadcastTX("broadcast_tx_async", tx)
}
func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.broadcastTX("broadcast_tx_sync", tx)
}
func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call(route, []interface{}{tx}, tmResult)
if err != nil {
return nil, errors.Wrap(err, route)
}
return (*tmResult).(*ctypes.ResultBroadcastTx), nil
}
func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("net_info", nil, tmResult)
if err != nil {
return nil, errors.Wrap(err, "NetInfo")
}
return (*tmResult).(*ctypes.ResultNetInfo), nil
}
func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("dump_consensus_state", nil, tmResult)
if err != nil {
return nil, errors.Wrap(err, "DumpConsensusState")
}
return (*tmResult).(*ctypes.ResultDumpConsensusState), nil
}
func (c *HTTP) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("blockchain", []interface{}{minHeight, maxHeight}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "BlockchainInfo")
}
return (*tmResult).(*ctypes.ResultBlockchainInfo), nil
}
func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("genesis", nil, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Genesis")
}
return (*tmResult).(*ctypes.ResultGenesis), nil
}
func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("block", []interface{}{height}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Block")
}
return (*tmResult).(*ctypes.ResultBlock), nil
}
func (c *HTTP) Commit(height int) (*ctypes.ResultCommit, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("commit", []interface{}{height}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Commit")
}
return (*tmResult).(*ctypes.ResultCommit), nil
}
func (c *HTTP) Validators() (*ctypes.ResultValidators, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("validators", nil, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Validators")
}
return (*tmResult).(*ctypes.ResultValidators), nil
}
/** websocket event stuff here... **/
type WSEvents struct {
types.EventSwitch
remote string
endpoint string
ws *rpcclient.WSClient
// used for signaling the goroutine that feeds ws -> EventSwitch
quit chan bool
done chan bool
// used to maintain counts of actively listened events
// so we can properly subscribe/unsubscribe
// FIXME: thread-safety???
// FIXME: reuse code from go-events???
evtCount map[string]int // count how many time each event is subscribed
listeners map[string][]string // keep track of which events each listener is listening to
}
func newWSEvents(remote, endpoint string) *WSEvents {
return &WSEvents{
EventSwitch: types.NewEventSwitch(),
endpoint: endpoint,
remote: remote,
quit: make(chan bool, 1),
done: make(chan bool, 1),
evtCount: map[string]int{},
listeners: map[string][]string{},
}
}
func (w *WSEvents) _assertIsEventSwitch() types.EventSwitch {
return w
}
// Start is the only way I could think the extend OnStart from
// events.eventSwitch. If only it wasn't private...
// BaseService.Start -> eventSwitch.OnStart -> WSEvents.Start
func (w *WSEvents) Start() (bool, error) {
st, err := w.EventSwitch.Start()
// if we did start, then OnStart here...
if st && err == nil {
ws := rpcclient.NewWSClient(w.remote, w.endpoint)
_, err = ws.Start()
if err == nil {
w.ws = ws
go w.eventListener()
}
}
return st, errors.Wrap(err, "StartWSEvent")
}
// Stop wraps the BaseService/eventSwitch actions as Start does
func (w *WSEvents) Stop() bool {
stop := w.EventSwitch.Stop()
if stop {
// send a message to quit to stop the eventListener
w.quit <- true
<-w.done
w.ws.Stop()
w.ws = nil
}
return stop
}
/** TODO: more intelligent subscriptions! **/
func (w *WSEvents) AddListenerForEvent(listenerID, event string, cb events.EventCallback) {
// no one listening -> subscribe
if w.evtCount[event] == 0 {
w.subscribe(event)
}
// if this listener was already listening to this event, return early
for _, s := range w.listeners[listenerID] {
if event == s {
return
}
}
// otherwise, add this event to this listener
w.evtCount[event] += 1
w.listeners[listenerID] = append(w.listeners[listenerID], event)
w.EventSwitch.AddListenerForEvent(listenerID, event, cb)
}
func (w *WSEvents) RemoveListenerForEvent(event string, listenerID string) {
// if this listener is listening already, splice it out
found := false
l := w.listeners[listenerID]
for i, s := range l {
if event == s {
found = true
w.listeners[listenerID] = append(l[:i], l[i+1:]...)
break
}
}
// if the listener wasn't already listening to the event, exit early
if !found {
return
}
// now we can update the subscriptions
w.evtCount[event] -= 1
if w.evtCount[event] == 0 {
w.unsubscribe(event)
}
w.EventSwitch.RemoveListenerForEvent(event, listenerID)
}
func (w *WSEvents) RemoveListener(listenerID string) {
// remove all counts for this listener
for _, s := range w.listeners[listenerID] {
w.evtCount[s] -= 1
if w.evtCount[s] == 0 {
w.unsubscribe(s)
}
}
w.listeners[listenerID] = nil
// then let the switch do it's magic
w.EventSwitch.RemoveListener(listenerID)
}
// eventListener is an infinite loop pulling all websocket events
// and pushing them to the EventSwitch.
//
// the goroutine only stops by closing quit
func (w *WSEvents) eventListener() {
for {
select {
case res := <-w.ws.ResultsCh:
// res is json.RawMessage
err := w.parseEvent(res)
if err != nil {
// FIXME: better logging/handling of errors??
fmt.Printf("ws result: %+v\n", err)
}
case err := <-w.ws.ErrorsCh:
// FIXME: better logging/handling of errors??
fmt.Printf("ws err: %+v\n", err)
case <-w.quit:
// send a message so we can wait for the routine to exit
// before cleaning up the w.ws stuff
w.done <- true
return
}
}
}
// parseEvent unmarshals the json message and converts it into
// some implementation of types.TMEventData, and sends it off
// on the merry way to the EventSwitch
func (w *WSEvents) parseEvent(data []byte) (err error) {
result := new(ctypes.TMResult)
wire.ReadJSONPtr(result, data, &err)
if err != nil {
return err
}
event, ok := (*result).(*ctypes.ResultEvent)
if !ok {
// ignore silently (eg. subscribe, unsubscribe and maybe other events)
return nil
}
// looks good! let's fire this baby!
w.EventSwitch.FireEvent(event.Name, event.Data)
return nil
}
// no way of exposing these failures, so we panic.
// is this right? or silently ignore???
func (w *WSEvents) subscribe(event string) {
err := w.ws.Subscribe(event)
if err != nil {
panic(err)
}
}
func (w *WSEvents) unsubscribe(event string) {
err := w.ws.Unsubscribe(event)
if err != nil {
panic(err)
}
}

+ 82
- 0
rpc/client/interface.go View File

@ -0,0 +1,82 @@
/*
package client provides a general purpose interface (Client) for connecting
to a tendermint node, as well as higher-level functionality.
The main implementation for production code is client.HTTP, which
connects via http to the jsonrpc interface of the tendermint node.
For connecting to a node running in the same process (eg. when
compiling the abci app in the same process), you can use the client.Local
implementation.
For mocking out server responses during testing to see behavior for
arbitrary return values, use the mock package.
In addition to the Client interface, which should be used externally
for maximum flexibility and testability, and two implementations,
this package also provides helper functions that work on any Client
implementation.
*/
package client
import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// ABCIClient groups together the functionality that principally
// affects the ABCI app. In many cases this will be all we want,
// so we can accept an interface which is easier to mock
type ABCIClient interface {
// reading from abci app
ABCIInfo() (*ctypes.ResultABCIInfo, error)
ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error)
// writing to abci app
BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error)
BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
}
// SignClient groups together the interfaces need to get valid
// signatures and prove anything about the chain
type SignClient interface {
Block(height int) (*ctypes.ResultBlock, error)
Commit(height int) (*ctypes.ResultCommit, error)
Validators() (*ctypes.ResultValidators, error)
}
// HistoryClient shows us data from genesis to now in large chunks.
type HistoryClient interface {
Genesis() (*ctypes.ResultGenesis, error)
BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error)
}
type StatusClient interface {
// general chain info
Status() (*ctypes.ResultStatus, error)
}
// Client wraps most important rpc calls a client would make
// if you want to listen for events, test if it also
// implements events.EventSwitch
type Client interface {
ABCIClient
SignClient
HistoryClient
StatusClient
// this Client is reactive, you can subscribe to any TMEventData
// type, given the proper string. see tendermint/types/events.go
types.EventSwitch
}
// NetworkClient is general info about the network state. May not
// be needed usually.
//
// Not included in the Client interface, but generally implemented
// by concrete implementations.
type NetworkClient interface {
NetInfo() (*ctypes.ResultNetInfo, error)
DumpConsensusState() (*ctypes.ResultDumpConsensusState, error)
}

+ 105
- 0
rpc/client/localclient.go View File

@ -0,0 +1,105 @@
package client
import (
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
/*
Local is a Client implementation that directly executes the rpc
functions on a given node, without going through HTTP or GRPC
This implementation is useful for:
* Running tests against a node in-process without the overhead
of going through an http server
* Communication between an ABCI app and tendermin core when they
are compiled in process.
For real clients, you probably want to use client.HTTP. For more
powerful control during testing, you probably want the "client/mock" package.
*/
type Local struct {
node *nm.Node
types.EventSwitch
}
// NewLocal configures a client that calls the Node directly.
//
// Note that given how rpc/core works with package singletons, that
// you can only have one node per process. So make sure test cases
// don't run in parallel, or try to simulate an entire network in
// one process...
func NewLocal(node *nm.Node) Local {
node.ConfigureRPC()
return Local{
node: node,
EventSwitch: node.EventSwitch(),
}
}
func (c Local) _assertIsClient() Client {
return c
}
func (c Local) _assertIsNetworkClient() NetworkClient {
return c
}
func (c Local) Status() (*ctypes.ResultStatus, error) {
return core.Status()
}
func (c Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return core.ABCIInfo()
}
func (c Local) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(path, data, prove)
}
func (c Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return core.BroadcastTxCommit(tx)
}
func (c Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxAsync(tx)
}
func (c Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxSync(tx)
}
func (c Local) NetInfo() (*ctypes.ResultNetInfo, error) {
return core.NetInfo()
}
func (c Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
return core.DumpConsensusState()
}
func (c Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
return core.UnsafeDialSeeds(seeds)
}
func (c Local) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
return core.BlockchainInfo(minHeight, maxHeight)
}
func (c Local) Genesis() (*ctypes.ResultGenesis, error) {
return core.Genesis()
}
func (c Local) Block(height int) (*ctypes.ResultBlock, error) {
return core.Block(height)
}
func (c Local) Commit(height int) (*ctypes.ResultCommit, error) {
return core.Commit(height)
}
func (c Local) Validators() (*ctypes.ResultValidators, error) {
return core.Validators()
}

+ 24
- 0
rpc/client/main_test.go View File

@ -0,0 +1,24 @@
package client_test
import (
"os"
"testing"
meapp "github.com/tendermint/merkleeyes/app"
nm "github.com/tendermint/tendermint/node"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
var node *nm.Node
func TestMain(m *testing.M) {
// start a tendermint node (and merkleeyes) in the background to test against
app := meapp.NewMerkleEyesApp("", 100)
node = rpctest.StartTendermint(app)
code := m.Run()
// and shut down proper at the end
node.Stop()
node.Wait()
os.Exit(code)
}

+ 194
- 0
rpc/client/mock/abci.go View File

@ -0,0 +1,194 @@
package mock
import (
abci "github.com/tendermint/abci/types"
"github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// ABCIApp will send all abci related request to the named app,
// so you can test app behavior from a client without needing
// an entire tendermint node
type ABCIApp struct {
App abci.Application
}
func (a ABCIApp) _assertABCIClient() client.ABCIClient {
return a
}
func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return &ctypes.ResultABCIInfo{a.App.Info()}, nil
}
func (a ABCIApp) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
q := a.App.Query(abci.RequestQuery{data, path, 0, prove})
return &ctypes.ResultABCIQuery{q}, nil
}
func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res := ctypes.ResultBroadcastTxCommit{}
c := a.App.CheckTx(tx)
res.CheckTx = &abci.ResponseCheckTx{c.Code, c.Data, c.Log}
if !c.IsOK() {
return &res, nil
}
d := a.App.DeliverTx(tx)
res.DeliverTx = &abci.ResponseDeliverTx{d.Code, d.Data, d.Log}
return &res, nil
}
func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
// and this gets writen in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }()
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log}, nil
}
func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
// and this gets writen in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }()
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log}, nil
}
// ABCIMock will send all abci related request to the named app,
// so you can test app behavior from a client without needing
// an entire tendermint node
type ABCIMock struct {
Info Call
Query Call
BroadcastCommit Call
Broadcast Call
}
func (m ABCIMock) _assertABCIClient() client.ABCIClient {
return m
}
func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
res, err := m.Info.GetResponse(nil)
if err != nil {
return nil, err
}
return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil
}
func (m ABCIMock) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
res, err := m.Query.GetResponse(QueryArgs{path, data, prove})
if err != nil {
return nil, err
}
return &ctypes.ResultABCIQuery{res.(abci.ResponseQuery)}, nil
}
func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res, err := m.BroadcastCommit.GetResponse(tx)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultBroadcastTxCommit), nil
}
func (m ABCIMock) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := m.Broadcast.GetResponse(tx)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultBroadcastTx), nil
}
func (m ABCIMock) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := m.Broadcast.GetResponse(tx)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultBroadcastTx), nil
}
// ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client)
// and record all ABCI related calls.
type ABCIRecorder struct {
Client client.ABCIClient
Calls []Call
}
func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder {
return &ABCIRecorder{
Client: client,
Calls: []Call{},
}
}
func (r *ABCIRecorder) _assertABCIClient() client.ABCIClient {
return r
}
type QueryArgs struct {
Path string
Data []byte
Prove bool
}
func (r *ABCIRecorder) addCall(call Call) {
r.Calls = append(r.Calls, call)
}
func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
res, err := r.Client.ABCIInfo()
r.addCall(Call{
Name: "abci_info",
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
res, err := r.Client.ABCIQuery(path, data, prove)
r.addCall(Call{
Name: "abci_query",
Args: QueryArgs{path, data, prove},
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res, err := r.Client.BroadcastTxCommit(tx)
r.addCall(Call{
Name: "broadcast_tx_commit",
Args: tx,
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := r.Client.BroadcastTxAsync(tx)
r.addCall(Call{
Name: "broadcast_tx_async",
Args: tx,
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := r.Client.BroadcastTxSync(tx)
r.addCall(Call{
Name: "broadcast_tx_sync",
Args: tx,
Response: res,
Error: err,
})
return res, err
}

+ 169
- 0
rpc/client/mock/abci_test.go View File

@ -0,0 +1,169 @@
package mock_test
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/abci/example/dummy"
abci "github.com/tendermint/abci/types"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/rpc/client/mock"
)
func TestABCIMock(t *testing.T) {
assert, require := assert.New(t), require.New(t)
key, value := []byte("foo"), []byte("bar")
height := uint64(10)
goodTx := types.Tx{0x01, 0xff}
badTx := types.Tx{0x12, 0x21}
m := mock.ABCIMock{
Info: mock.Call{Error: errors.New("foobar")},
Query: mock.Call{Response: abci.ResponseQuery{
Key: key,
Value: value,
Height: height,
}},
// Broadcast commit depends on call
BroadcastCommit: mock.Call{
Args: goodTx,
Response: &ctypes.ResultBroadcastTxCommit{
CheckTx: &abci.ResponseCheckTx{Data: []byte("stand")},
DeliverTx: &abci.ResponseDeliverTx{Data: []byte("deliver")},
},
Error: errors.New("bad tx"),
},
Broadcast: mock.Call{Error: errors.New("must commit")},
}
// now, let's try to make some calls
_, err := m.ABCIInfo()
require.NotNil(err)
assert.Equal("foobar", err.Error())
// query always returns the response
query, err := m.ABCIQuery("/", nil, false)
require.Nil(err)
require.NotNil(query)
assert.Equal(key, query.Response.GetKey())
assert.Equal(value, query.Response.GetValue())
assert.Equal(height, query.Response.GetHeight())
// non-commit calls always return errors
_, err = m.BroadcastTxSync(goodTx)
require.NotNil(err)
assert.Equal("must commit", err.Error())
_, err = m.BroadcastTxAsync(goodTx)
require.NotNil(err)
assert.Equal("must commit", err.Error())
// commit depends on the input
_, err = m.BroadcastTxCommit(badTx)
require.NotNil(err)
assert.Equal("bad tx", err.Error())
bres, err := m.BroadcastTxCommit(goodTx)
require.Nil(err, "%+v", err)
assert.EqualValues(0, bres.CheckTx.Code)
assert.EqualValues("stand", bres.CheckTx.Data)
assert.EqualValues("deliver", bres.DeliverTx.Data)
}
func TestABCIRecorder(t *testing.T) {
assert, require := assert.New(t), require.New(t)
m := mock.ABCIMock{
Info: mock.Call{Response: abci.ResponseInfo{
Data: "data",
Version: "v0.9.9",
}},
Query: mock.Call{Error: errors.New("query")},
Broadcast: mock.Call{Error: errors.New("broadcast")},
BroadcastCommit: mock.Call{Error: errors.New("broadcast_commit")},
}
r := mock.NewABCIRecorder(m)
require.Equal(0, len(r.Calls))
r.ABCIInfo()
r.ABCIQuery("path", []byte("data"), true)
require.Equal(2, len(r.Calls))
info := r.Calls[0]
assert.Equal("abci_info", info.Name)
assert.Nil(info.Error)
assert.Nil(info.Args)
require.NotNil(info.Response)
ir, ok := info.Response.(*ctypes.ResultABCIInfo)
require.True(ok)
assert.Equal("data", ir.Response.Data)
assert.Equal("v0.9.9", ir.Response.Version)
query := r.Calls[1]
assert.Equal("abci_query", query.Name)
assert.Nil(query.Response)
require.NotNil(query.Error)
assert.Equal("query", query.Error.Error())
require.NotNil(query.Args)
qa, ok := query.Args.(mock.QueryArgs)
require.True(ok)
assert.Equal("path", qa.Path)
assert.EqualValues("data", qa.Data)
assert.True(qa.Prove)
// now add some broadcasts
txs := []types.Tx{{1}, {2}, {3}}
r.BroadcastTxCommit(txs[0])
r.BroadcastTxSync(txs[1])
r.BroadcastTxAsync(txs[2])
require.Equal(5, len(r.Calls))
bc := r.Calls[2]
assert.Equal("broadcast_tx_commit", bc.Name)
assert.Nil(bc.Response)
require.NotNil(bc.Error)
assert.EqualValues(bc.Args, txs[0])
bs := r.Calls[3]
assert.Equal("broadcast_tx_sync", bs.Name)
assert.Nil(bs.Response)
require.NotNil(bs.Error)
assert.EqualValues(bs.Args, txs[1])
ba := r.Calls[4]
assert.Equal("broadcast_tx_async", ba.Name)
assert.Nil(ba.Response)
require.NotNil(ba.Error)
assert.EqualValues(ba.Args, txs[2])
}
func TestABCIApp(t *testing.T) {
assert, require := assert.New(t), require.New(t)
app := dummy.NewDummyApplication()
m := mock.ABCIApp{app}
// get some info
info, err := m.ABCIInfo()
require.Nil(err)
assert.Equal(`{"size":0}`, info.Response.GetData())
// add a key
key, value := "foo", "bar"
tx := fmt.Sprintf("%s=%s", key, value)
res, err := m.BroadcastTxCommit(types.Tx(tx))
require.Nil(err)
assert.True(res.CheckTx.Code.IsOK())
require.NotNil(res.DeliverTx)
assert.True(res.DeliverTx.Code.IsOK())
// check the key
qres, err := m.ABCIQuery("/key", []byte(key), false)
require.Nil(err)
assert.EqualValues(value, qres.Response.Value)
}

+ 128
- 0
rpc/client/mock/client.go View File

@ -0,0 +1,128 @@
/*
package mock returns a Client implementation that
accepts various (mock) implementations of the various methods.
This implementation is useful for using in tests, when you don't
need a real server, but want a high-level of control about
the server response you want to mock (eg. error handling),
or if you just want to record the calls to verify in your tests.
For real clients, you probably want the "http" package. If you
want to directly call a tendermint node in process, you can use the
"local" package.
*/
package mock
import (
"reflect"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// Client wraps arbitrary implementations of the various interfaces.
//
// We provide a few choices to mock out each one in this package.
// Nothing hidden here, so no New function, just construct it from
// some parts, and swap them out them during the tests.
type Client struct {
client.ABCIClient
client.SignClient
client.HistoryClient
client.StatusClient
// create a mock with types.NewEventSwitch()
types.EventSwitch
}
func (c Client) _assertIsClient() client.Client {
return c
}
// Call is used by recorders to save a call and response.
// It can also be used to configure mock responses.
//
type Call struct {
Name string
Args interface{}
Response interface{}
Error error
}
// GetResponse will generate the apporiate response for us, when
// using the Call struct to configure a Mock handler.
//
// When configuring a response, if only one of Response or Error is
// set then that will always be returned. If both are set, then
// we return Response if the Args match the set args, Error otherwise.
func (c Call) GetResponse(args interface{}) (interface{}, error) {
// handle the case with no response
if c.Response == nil {
if c.Error == nil {
panic("Misconfigured call, you must set either Response or Error")
}
return nil, c.Error
}
// response without error
if c.Error == nil {
return c.Response, nil
}
// have both, we must check args....
if reflect.DeepEqual(args, c.Args) {
return c.Response, nil
}
return nil, c.Error
}
func (c Client) Status() (*ctypes.ResultStatus, error) {
return core.Status()
}
func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return core.ABCIInfo()
}
func (c Client) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(path, data, prove)
}
func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return core.BroadcastTxCommit(tx)
}
func (c Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxAsync(tx)
}
func (c Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxSync(tx)
}
func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) {
return core.NetInfo()
}
func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
return core.UnsafeDialSeeds(seeds)
}
func (c Client) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
return core.BlockchainInfo(minHeight, maxHeight)
}
func (c Client) Genesis() (*ctypes.ResultGenesis, error) {
return core.Genesis()
}
func (c Client) Block(height int) (*ctypes.ResultBlock, error) {
return core.Block(height)
}
func (c Client) Commit(height int) (*ctypes.ResultCommit, error) {
return core.Commit(height)
}
func (c Client) Validators() (*ctypes.ResultValidators, error) {
return core.Validators()
}

+ 55
- 0
rpc/client/mock/status.go View File

@ -0,0 +1,55 @@
package mock
import (
"github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
// StatusMock returns the result specified by the Call
type StatusMock struct {
Call
}
func (m *StatusMock) _assertStatusClient() client.StatusClient {
return m
}
func (m *StatusMock) Status() (*ctypes.ResultStatus, error) {
res, err := m.GetResponse(nil)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultStatus), nil
}
// StatusRecorder can wrap another type (StatusMock, full client)
// and record the status calls
type StatusRecorder struct {
Client client.StatusClient
Calls []Call
}
func NewStatusRecorder(client client.StatusClient) *StatusRecorder {
return &StatusRecorder{
Client: client,
Calls: []Call{},
}
}
func (r *StatusRecorder) _assertStatusClient() client.StatusClient {
return r
}
func (r *StatusRecorder) addCall(call Call) {
r.Calls = append(r.Calls, call)
}
func (r *StatusRecorder) Status() (*ctypes.ResultStatus, error) {
res, err := r.Client.Status()
r.addCall(Call{
Name: "status",
Response: res,
Error: err,
})
return res, err
}

+ 45
- 0
rpc/client/mock/status_test.go View File

@ -0,0 +1,45 @@
package mock_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/rpc/client/mock"
)
func TestStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t)
m := &mock.StatusMock{
Call: mock.Call{
Response: &ctypes.ResultStatus{
LatestBlockHash: []byte("block"),
LatestAppHash: []byte("app"),
LatestBlockHeight: 10,
}},
}
r := mock.NewStatusRecorder(m)
require.Equal(0, len(r.Calls))
// make sure response works proper
status, err := r.Status()
require.Nil(err, "%+v", err)
assert.EqualValues("block", status.LatestBlockHash)
assert.EqualValues(10, status.LatestBlockHeight)
// make sure recorder works properly
require.Equal(1, len(r.Calls))
rs := r.Calls[0]
assert.Equal("status", rs.Name)
assert.Nil(rs.Args)
assert.Nil(rs.Error)
require.NotNil(rs.Response)
st, ok := rs.Response.(*ctypes.ResultStatus)
require.True(ok)
assert.EqualValues("block", st.LatestBlockHash)
assert.EqualValues(10, st.LatestBlockHeight)
}

+ 179
- 0
rpc/client/rpc_test.go View File

@ -0,0 +1,179 @@
package client_test
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
merkle "github.com/tendermint/go-merkle"
merktest "github.com/tendermint/merkleeyes/testutil"
"github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
func getHTTPClient() *client.HTTP {
rpcAddr := rpctest.GetConfig().GetString("rpc_laddr")
return client.NewHTTP(rpcAddr, "/websocket")
}
func getLocalClient() client.Local {
return client.NewLocal(node)
}
// GetClients returns a slice of clients for table-driven tests
func GetClients() []client.Client {
return []client.Client{
getHTTPClient(),
getLocalClient(),
}
}
// Make sure status is correct (we connect properly)
func TestStatus(t *testing.T) {
for i, c := range GetClients() {
chainID := rpctest.GetConfig().GetString("chain_id")
status, err := c.Status()
require.Nil(t, err, "%d: %+v", i, err)
assert.Equal(t, chainID, status.NodeInfo.Network)
}
}
// Make sure info is correct (we connect properly)
func TestInfo(t *testing.T) {
for i, c := range GetClients() {
// status, err := c.Status()
// require.Nil(t, err, "%+v", err)
info, err := c.ABCIInfo()
require.Nil(t, err, "%d: %+v", i, err)
// TODO: this is not correct - fix merkleeyes!
// assert.EqualValues(t, status.LatestBlockHeight, info.Response.LastBlockHeight)
assert.True(t, strings.HasPrefix(info.Response.Data, "size"))
}
}
func TestNetInfo(t *testing.T) {
for i, c := range GetClients() {
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
netinfo, err := nc.NetInfo()
require.Nil(t, err, "%d: %+v", i, err)
assert.True(t, netinfo.Listening)
assert.Equal(t, 0, len(netinfo.Peers))
}
}
func TestDumpConsensusState(t *testing.T) {
for i, c := range GetClients() {
// FIXME: fix server so it doesn't panic on invalid input
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
cons, err := nc.DumpConsensusState()
require.Nil(t, err, "%d: %+v", i, err)
assert.NotEmpty(t, cons.RoundState)
assert.Empty(t, cons.PeerRoundStates)
}
}
func TestGenesisAndValidators(t *testing.T) {
for i, c := range GetClients() {
chainID := rpctest.GetConfig().GetString("chain_id")
// make sure this is the right genesis file
gen, err := c.Genesis()
require.Nil(t, err, "%d: %+v", i, err)
assert.Equal(t, chainID, gen.Genesis.ChainID)
// get the genesis validator
require.Equal(t, 1, len(gen.Genesis.Validators))
gval := gen.Genesis.Validators[0]
// get the current validators
vals, err := c.Validators()
require.Nil(t, err, "%d: %+v", i, err)
require.Equal(t, 1, len(vals.Validators))
val := vals.Validators[0]
// make sure the current set is also the genesis set
assert.Equal(t, gval.Amount, val.VotingPower)
assert.Equal(t, gval.PubKey, val.PubKey)
}
}
// Make some app checks
func TestAppCalls(t *testing.T) {
assert, require := assert.New(t), require.New(t)
for i, c := range GetClients() {
// get an offset of height to avoid racing and guessing
s, err := c.Status()
require.Nil(err, "%d: %+v", i, err)
// sh is start height or status height
sh := s.LatestBlockHeight
// look for the future
_, err = c.Block(sh + 2)
assert.NotNil(err) // no block yet
// write something
k, v, tx := merktest.MakeTxKV()
_, err = c.BroadcastTxCommit(tx)
require.Nil(err, "%d: %+v", i, err)
// wait before querying
time.Sleep(time.Second * 1)
qres, err := c.ABCIQuery("/key", k, false)
if assert.Nil(err) && assert.True(qres.Response.Code.IsOK()) {
data := qres.Response
// assert.Equal(k, data.GetKey()) // only returned for proofs
assert.Equal(v, data.GetValue())
}
// +/- 1 making my head hurt
h := int(qres.Response.Height) - 1
// and we can even check the block is added
block, err := c.Block(h)
require.Nil(err, "%d: %+v", i, err)
appHash := block.BlockMeta.Header.AppHash
assert.True(len(appHash) > 0)
assert.EqualValues(h, block.BlockMeta.Header.Height)
// check blockchain info, now that we know there is info
// TODO: is this commented somewhere that they are returned
// in order of descending height???
info, err := c.BlockchainInfo(h-2, h)
require.Nil(err, "%d: %+v", i, err)
assert.True(info.LastHeight > 2)
if assert.Equal(3, len(info.BlockMetas)) {
lastMeta := info.BlockMetas[0]
assert.EqualValues(h, lastMeta.Header.Height)
bMeta := block.BlockMeta
assert.Equal(bMeta.Header.AppHash, lastMeta.Header.AppHash)
assert.Equal(bMeta.BlockID, lastMeta.BlockID)
}
// and get the corresponding commit with the same apphash
commit, err := c.Commit(h)
require.Nil(err, "%d: %+v", i, err)
cappHash := commit.Header.AppHash
assert.Equal(appHash, cappHash)
assert.NotNil(commit.Commit)
// compare the commits (note Commit(2) has commit from Block(3))
commit2, err := c.Commit(h - 1)
require.Nil(err, "%d: %+v", i, err)
assert.Equal(block.Block.LastCommit, commit2.Commit)
// and we got a proof that works!
pres, err := c.ABCIQuery("/key", k, true)
if assert.Nil(err) && assert.True(pres.Response.Code.IsOK()) {
proof, err := merkle.ReadProof(pres.Response.GetProof())
if assert.Nil(err) {
key := pres.Response.GetKey()
value := pres.Response.GetValue()
assert.Equal(appHash, proof.RootHash)
valid := proof.Verify(key, value, appHash)
assert.True(valid)
}
}
}
}

+ 14
- 10
rpc/core/abci.go View File

@ -1,25 +1,29 @@
package core
import (
abci "github.com/tendermint/abci/types"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
//-----------------------------------------------------------------------------
func ABCIQuery(query []byte) (*ctypes.ResultABCIQuery, error) {
res := proxyAppQuery.QuerySync(query)
return &ctypes.ResultABCIQuery{res}, nil
func ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{
Path: path,
Data: data,
Prove: prove,
})
if err != nil {
return nil, err
}
log.Info("ABCIQuery", "path", path, "data", data, "result", resQuery)
return &ctypes.ResultABCIQuery{resQuery}, nil
}
func ABCIInfo() (*ctypes.ResultABCIInfo, error) {
res, err := proxyAppQuery.InfoSync()
resInfo, err := proxyAppQuery.InfoSync()
if err != nil {
return nil, err
}
return &ctypes.ResultABCIInfo{
Data: res.Data,
Version: res.Version,
LastBlockHeight: res.LastBlockHeight,
LastBlockAppHash: res.LastBlockAppHash,
}, nil
return &ctypes.ResultABCIInfo{resInfo}, nil
}

+ 25
- 0
rpc/core/blocks.go View File

@ -44,3 +44,28 @@ func Block(height int) (*ctypes.ResultBlock, error) {
block := blockStore.LoadBlock(height)
return &ctypes.ResultBlock{blockMeta, block}, nil
}
//-----------------------------------------------------------------------------
func Commit(height int) (*ctypes.ResultCommit, error) {
if height == 0 {
return nil, fmt.Errorf("Height must be greater than 0")
}
storeHeight := blockStore.Height()
if height > storeHeight {
return nil, fmt.Errorf("Height must be less than or equal to the current blockchain height")
}
header := blockStore.LoadBlockMeta(height).Header
// If the next block has not been committed yet,
// use a non-canonical commit
if height == storeHeight {
commit := blockStore.LoadSeenCommit(height)
return &ctypes.ResultCommit{header, commit, false}, nil
}
// Return the canonical commit (comes from the block at height+1)
commit := blockStore.LoadBlockCommit(height)
return &ctypes.ResultCommit{header, commit, true}, nil
}

+ 3
- 2
rpc/core/net.go View File

@ -32,8 +32,9 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
// Dial given list of seeds
func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
// starts go routines to dial each seed after random delays
p2pSwitch.DialSeeds(seeds)
return &ctypes.ResultDialSeeds{}, nil
log.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds)
err := p2pSwitch.DialSeeds(addrBook, seeds)
return &ctypes.ResultDialSeeds{}, err
}
//-----------------------------------------------------------------------------


+ 20
- 28
rpc/core/pipe.go View File

@ -8,55 +8,43 @@ import (
"github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
abci "github.com/tendermint/abci/types"
)
//-----------------------------------------------------
// Interfaces for use by RPC
// NOTE: these methods must be thread safe!
type BlockStore interface {
Height() int
LoadBlockMeta(height int) *types.BlockMeta
LoadBlock(height int) *types.Block
}
//----------------------------------------------
// These interfaces are used by RPC and must be thread safe
type Consensus interface {
GetValidators() (int, []*types.Validator)
GetRoundState() *consensus.RoundState
}
type Mempool interface {
Size() int
CheckTx(types.Tx, func(*abci.Response)) error
Reap(int) []types.Tx
Flush()
}
type P2P interface {
Listeners() []p2p.Listener
Peers() p2p.IPeerSet
NumPeers() (outbound, inbound, dialig int)
NodeInfo() *p2p.NodeInfo
IsListening() bool
DialSeeds([]string)
DialSeeds(*p2p.AddrBook, []string) error
}
//----------------------------------------------
var (
// external, thread safe interfaces
eventSwitch types.EventSwitch
proxyAppQuery proxy.AppConnQuery
config cfg.Config
// interfaces defined above
blockStore BlockStore
// interfaces defined in types and above
blockStore types.BlockStore
mempool types.Mempool
consensusState Consensus
mempool Mempool
p2pSwitch P2P
// objects
pubKey crypto.PubKey
genDoc *types.GenesisDoc // cache the genesis structure
pubKey crypto.PubKey
genDoc *types.GenesisDoc // cache the genesis structure
addrBook *p2p.AddrBook
)
func SetConfig(c cfg.Config) {
@ -67,16 +55,16 @@ func SetEventSwitch(evsw types.EventSwitch) {
eventSwitch = evsw
}
func SetBlockStore(bs BlockStore) {
func SetBlockStore(bs types.BlockStore) {
blockStore = bs
}
func SetConsensusState(cs Consensus) {
consensusState = cs
func SetMempool(mem types.Mempool) {
mempool = mem
}
func SetMempool(mem Mempool) {
mempool = mem
func SetConsensusState(cs Consensus) {
consensusState = cs
}
func SetSwitch(sw P2P) {
@ -91,6 +79,10 @@ func SetGenesisDoc(doc *types.GenesisDoc) {
genDoc = doc
}
func SetAddrBook(book *p2p.AddrBook) {
addrBook = book
}
func SetProxyAppQuery(appConn proxy.AppConnQuery) {
proxyAppQuery = appConn
}

+ 12
- 3
rpc/core/routes.go View File

@ -18,6 +18,7 @@ var Routes = map[string]*rpc.RPCFunc{
"blockchain": rpc.NewRPCFunc(BlockchainInfoResult, "minHeight,maxHeight"),
"genesis": rpc.NewRPCFunc(GenesisResult, ""),
"block": rpc.NewRPCFunc(BlockResult, "height"),
"commit": rpc.NewRPCFunc(CommitResult, "height"),
"validators": rpc.NewRPCFunc(ValidatorsResult, ""),
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusStateResult, ""),
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxsResult, ""),
@ -29,7 +30,7 @@ var Routes = map[string]*rpc.RPCFunc{
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsyncResult, "tx"),
// abci API
"abci_query": rpc.NewRPCFunc(ABCIQueryResult, "query"),
"abci_query": rpc.NewRPCFunc(ABCIQueryResult, "path,data,prove"),
"abci_info": rpc.NewRPCFunc(ABCIInfoResult, ""),
// control API
@ -107,6 +108,14 @@ func BlockResult(height int) (ctypes.TMResult, error) {
}
}
func CommitResult(height int) (ctypes.TMResult, error) {
if r, err := Commit(height); err != nil {
return nil, err
} else {
return r, nil
}
}
func ValidatorsResult() (ctypes.TMResult, error) {
if r, err := Validators(); err != nil {
return nil, err
@ -163,8 +172,8 @@ func BroadcastTxAsyncResult(tx []byte) (ctypes.TMResult, error) {
}
}
func ABCIQueryResult(query []byte) (ctypes.TMResult, error) {
if r, err := ABCIQuery(query); err != nil {
func ABCIQueryResult(path string, data []byte, prove bool) (ctypes.TMResult, error) {
if r, err := ABCIQuery(path, data, prove); err != nil {
return nil, err
} else {
return r, nil


+ 1
- 1
rpc/core/status.go View File

@ -15,7 +15,7 @@ func Status() (*ctypes.ResultStatus, error) {
)
if latestHeight != 0 {
latestBlockMeta = blockStore.LoadBlockMeta(latestHeight)
latestBlockHash = latestBlockMeta.Hash
latestBlockHash = latestBlockMeta.BlockID.Hash
latestAppHash = latestBlockMeta.Header.AppHash
latestBlockTime = latestBlockMeta.Header.Time.UnixNano()
}


+ 12
- 7
rpc/core/types/responses.go View File

@ -1,12 +1,12 @@
package core_types
import (
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-crypto"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-rpc/types"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
abci "github.com/tendermint/abci/types"
)
type ResultBlockchainInfo struct {
@ -23,6 +23,12 @@ type ResultBlock struct {
Block *types.Block `json:"block"`
}
type ResultCommit struct {
Header *types.Header `json:"header"`
Commit *types.Commit `json:"commit"`
CanonicalCommit bool `json:"canonical"`
}
type ResultStatus struct {
NodeInfo *p2p.NodeInfo `json:"node_info"`
PubKey crypto.PubKey `json:"pub_key"`
@ -64,7 +70,7 @@ type ResultBroadcastTx struct {
}
type ResultBroadcastTxCommit struct {
CheckTx *abci.ResponseCheckTx `json:"check_tx"`
CheckTx *abci.ResponseCheckTx `json:"check_tx"`
DeliverTx *abci.ResponseDeliverTx `json:"deliver_tx"`
}
@ -74,14 +80,11 @@ type ResultUnconfirmedTxs struct {
}
type ResultABCIInfo struct {
Data string `json:"data"`
Version string `json:"version"`
LastBlockHeight uint64 `json:"last_block_height"`
LastBlockAppHash []byte `json:"last_block_app_hash"`
Response abci.ResponseInfo `json:"response"`
}
type ResultABCIQuery struct {
Result abci.Result `json:"result"`
Response abci.ResponseQuery `json:"response"`
}
type ResultUnsafeFlushMempool struct{}
@ -109,6 +112,7 @@ const (
ResultTypeGenesis = byte(0x01)
ResultTypeBlockchainInfo = byte(0x02)
ResultTypeBlock = byte(0x03)
ResultTypeCommit = byte(0x04)
// 0x2 bytes are for the network
ResultTypeStatus = byte(0x20)
@ -151,6 +155,7 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&ResultGenesis{}, ResultTypeGenesis},
wire.ConcreteType{&ResultBlockchainInfo{}, ResultTypeBlockchainInfo},
wire.ConcreteType{&ResultBlock{}, ResultTypeBlock},
wire.ConcreteType{&ResultCommit{}, ResultTypeCommit},
wire.ConcreteType{&ResultStatus{}, ResultTypeStatus},
wire.ConcreteType{&ResultNetInfo{}, ResultTypeNetInfo},
wire.ConcreteType{&ResultDialSeeds{}, ResultTypeDialSeeds},


+ 76
- 135
rpc/test/client_test.go View File

@ -8,12 +8,12 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-wire"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/abci/example/dummy"
abci "github.com/tendermint/abci/types"
)
//--------------------------------------------------------------------------------
@ -26,42 +26,35 @@ import (
func TestURIStatus(t *testing.T) {
tmResult := new(ctypes.TMResult)
_, err := clientURI.Call("status", map[string]interface{}{}, tmResult)
if err != nil {
panic(err)
}
_, err := GetURIClient().Call("status", map[string]interface{}{}, tmResult)
require.Nil(t, err)
testStatus(t, tmResult)
}
func TestJSONStatus(t *testing.T) {
tmResult := new(ctypes.TMResult)
_, err := clientJSON.Call("status", []interface{}{}, tmResult)
if err != nil {
panic(err)
}
_, err := GetJSONClient().Call("status", []interface{}{}, tmResult)
require.Nil(t, err)
testStatus(t, tmResult)
}
func testStatus(t *testing.T, statusI interface{}) {
chainID := GetConfig().GetString("chain_id")
tmRes := statusI.(*ctypes.TMResult)
status := (*tmRes).(*ctypes.ResultStatus)
if status.NodeInfo.Network != chainID {
panic(Fmt("ChainID mismatch: got %s expected %s",
status.NodeInfo.Network, chainID))
}
assert.Equal(t, chainID, status.NodeInfo.Network)
}
//--------------------------------------------------------------------------------
// broadcast tx sync
// random bytes (excluding byte('='))
func randBytes() []byte {
func randBytes(t *testing.T) []byte {
n := rand.Intn(10) + 2
buf := make([]byte, n)
_, err := crand.Read(buf)
if err != nil {
panic(err)
}
require.Nil(t, err)
return bytes.Replace(buf, []byte("="), []byte{100}, -1)
}
@ -69,11 +62,9 @@ func TestURIBroadcastTxSync(t *testing.T) {
config.Set("block_size", 0)
defer config.Set("block_size", -1)
tmResult := new(ctypes.TMResult)
tx := randBytes()
_, err := clientURI.Call("broadcast_tx_sync", map[string]interface{}{"tx": tx}, tmResult)
if err != nil {
panic(err)
}
tx := randBytes(t)
_, err := GetURIClient().Call("broadcast_tx_sync", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(t, err)
testBroadcastTxSync(t, tmResult, tx)
}
@ -81,91 +72,64 @@ func TestJSONBroadcastTxSync(t *testing.T) {
config.Set("block_size", 0)
defer config.Set("block_size", -1)
tmResult := new(ctypes.TMResult)
tx := randBytes()
_, err := clientJSON.Call("broadcast_tx_sync", []interface{}{tx}, tmResult)
if err != nil {
panic(err)
}
tx := randBytes(t)
_, err := GetJSONClient().Call("broadcast_tx_sync", []interface{}{tx}, tmResult)
require.Nil(t, err)
testBroadcastTxSync(t, tmResult, tx)
}
func testBroadcastTxSync(t *testing.T, resI interface{}, tx []byte) {
tmRes := resI.(*ctypes.TMResult)
res := (*tmRes).(*ctypes.ResultBroadcastTx)
if res.Code != abci.CodeType_OK {
panic(Fmt("BroadcastTxSync got non-zero exit code: %v. %X; %s", res.Code, res.Data, res.Log))
}
require.Equal(t, abci.CodeType_OK, res.Code)
mem := node.MempoolReactor().Mempool
if mem.Size() != 1 {
panic(Fmt("Mempool size should have been 1. Got %d", mem.Size()))
}
require.Equal(t, 1, mem.Size())
txs := mem.Reap(1)
if !bytes.Equal(txs[0], tx) {
panic(Fmt("Tx in mempool does not match test tx. Got %X, expected %X", txs[0], tx))
}
require.EqualValues(t, tx, txs[0])
mem.Flush()
}
//--------------------------------------------------------------------------------
// query
func testTxKV() ([]byte, []byte, []byte) {
k := randBytes()
v := randBytes()
func testTxKV(t *testing.T) ([]byte, []byte, []byte) {
k := randBytes(t)
v := randBytes(t)
return k, v, []byte(Fmt("%s=%s", k, v))
}
func sendTx() ([]byte, []byte) {
func sendTx(t *testing.T) ([]byte, []byte) {
tmResult := new(ctypes.TMResult)
k, v, tx := testTxKV()
_, err := clientJSON.Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
if err != nil {
panic(err)
}
fmt.Println("SENT TX", tx)
fmt.Printf("SENT TX %X\n", tx)
fmt.Printf("k %X; v %X", k, v)
k, v, tx := testTxKV(t)
_, err := GetJSONClient().Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
require.Nil(t, err)
return k, v
}
func TestURIABCIQuery(t *testing.T) {
k, v := sendTx()
k, v := sendTx(t)
time.Sleep(time.Second)
tmResult := new(ctypes.TMResult)
_, err := clientURI.Call("abci_query", map[string]interface{}{"query": k}, tmResult)
if err != nil {
panic(err)
}
_, err := GetURIClient().Call("abci_query", map[string]interface{}{"path": "", "data": k, "prove": false}, tmResult)
require.Nil(t, err)
testABCIQuery(t, tmResult, v)
}
func TestJSONABCIQuery(t *testing.T) {
k, v := sendTx()
k, v := sendTx(t)
tmResult := new(ctypes.TMResult)
_, err := clientJSON.Call("abci_query", []interface{}{k}, tmResult)
if err != nil {
panic(err)
}
_, err := GetJSONClient().Call("abci_query", []interface{}{"", k, false}, tmResult)
require.Nil(t, err)
testABCIQuery(t, tmResult, v)
}
func testABCIQuery(t *testing.T, statusI interface{}, value []byte) {
tmRes := statusI.(*ctypes.TMResult)
query := (*tmRes).(*ctypes.ResultABCIQuery)
if query.Result.IsErr() {
panic(Fmt("Query returned an err: %v", query))
}
resQuery := (*tmRes).(*ctypes.ResultABCIQuery)
require.EqualValues(t, 0, resQuery.Response.Code)
qResult := new(dummy.QueryResult)
if err := wire.ReadJSONBytes(query.Result.Data, qResult); err != nil {
t.Fatal(err)
}
// XXX: specific to value returned by the dummy
if qResult.Exists != true {
panic(Fmt("Query error. Expected to find 'exists=true'. Got: %v", qResult))
}
require.NotEqual(t, 0, len(resQuery.Response.Value))
}
//--------------------------------------------------------------------------------
@ -173,40 +137,30 @@ func testABCIQuery(t *testing.T, statusI interface{}, value []byte) {
func TestURIBroadcastTxCommit(t *testing.T) {
tmResult := new(ctypes.TMResult)
tx := randBytes()
_, err := clientURI.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, tmResult)
if err != nil {
panic(err)
}
tx := randBytes(t)
_, err := GetURIClient().Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(t, err)
testBroadcastTxCommit(t, tmResult, tx)
}
func TestJSONBroadcastTxCommit(t *testing.T) {
tmResult := new(ctypes.TMResult)
tx := randBytes()
_, err := clientJSON.Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
if err != nil {
panic(err)
}
tx := randBytes(t)
_, err := GetJSONClient().Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
require.Nil(t, err)
testBroadcastTxCommit(t, tmResult, tx)
}
func testBroadcastTxCommit(t *testing.T, resI interface{}, tx []byte) {
require := require.New(t)
tmRes := resI.(*ctypes.TMResult)
res := (*tmRes).(*ctypes.ResultBroadcastTxCommit)
checkTx := res.CheckTx
if checkTx.Code != abci.CodeType_OK {
panic(Fmt("BroadcastTxCommit got non-zero exit code from CheckTx: %v. %X; %s", checkTx.Code, checkTx.Data, checkTx.Log))
}
require.Equal(abci.CodeType_OK, checkTx.Code)
deliverTx := res.DeliverTx
if deliverTx.Code != abci.CodeType_OK {
panic(Fmt("BroadcastTxCommit got non-zero exit code from CheckTx: %v. %X; %s", deliverTx.Code, deliverTx.Data, deliverTx.Log))
}
require.Equal(abci.CodeType_OK, deliverTx.Code)
mem := node.MempoolReactor().Mempool
if mem.Size() != 0 {
panic(Fmt("Mempool size should have been 0. Got %d", mem.Size()))
}
require.Equal(0, mem.Size())
// TODO: find tx in block
}
@ -217,21 +171,22 @@ var wsTyp = "JSONRPC"
// make a simple connection to the server
func TestWSConnect(t *testing.T) {
wsc := newWSClient(t)
wsc := GetWSClient()
wsc.Stop()
}
// receive a new block message
func TestWSNewBlock(t *testing.T) {
wsc := newWSClient(t)
wsc := GetWSClient()
eid := types.EventStringNewBlock()
subscribe(t, wsc, eid)
require.Nil(t, wsc.Subscribe(eid))
defer func() {
unsubscribe(t, wsc, eid)
require.Nil(t, wsc.Unsubscribe(eid))
wsc.Stop()
}()
waitForEvent(t, wsc, eid, true, func() {}, func(eid string, b interface{}) error {
fmt.Println("Check:", b)
// fmt.Println("Check:", b)
return nil
})
}
@ -241,11 +196,12 @@ func TestWSBlockchainGrowth(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
wsc := newWSClient(t)
wsc := GetWSClient()
eid := types.EventStringNewBlock()
subscribe(t, wsc, eid)
require.Nil(t, wsc.Subscribe(eid))
defer func() {
unsubscribe(t, wsc, eid)
require.Nil(t, wsc.Unsubscribe(eid))
wsc.Stop()
}()
@ -269,35 +225,29 @@ func TestWSBlockchainGrowth(t *testing.T) {
}
func TestWSTxEvent(t *testing.T) {
wsc := newWSClient(t)
tx := randBytes()
require := require.New(t)
wsc := GetWSClient()
tx := randBytes(t)
// listen for the tx I am about to submit
eid := types.EventStringTx(types.Tx(tx))
subscribe(t, wsc, eid)
require.Nil(wsc.Subscribe(eid))
defer func() {
unsubscribe(t, wsc, eid)
require.Nil(wsc.Unsubscribe(eid))
wsc.Stop()
}()
// send an tx
tmResult := new(ctypes.TMResult)
_, err := clientJSON.Call("broadcast_tx_sync", []interface{}{tx}, tmResult)
if err != nil {
t.Fatal("Error submitting event")
}
_, err := GetJSONClient().Call("broadcast_tx_sync", []interface{}{tx}, tmResult)
require.Nil(err)
waitForEvent(t, wsc, eid, true, func() {}, func(eid string, b interface{}) error {
evt, ok := b.(types.EventDataTx)
if !ok {
t.Fatal("Got wrong event type", b)
}
if bytes.Compare([]byte(evt.Tx), tx) != 0 {
t.Error("Event returned different tx")
}
if evt.Code != abci.CodeType_OK {
t.Error("Event returned tx error code", evt.Code)
}
require.True(ok, "Got wrong event type: %#v", b)
require.Equal(tx, []byte(evt.Tx), "Returned different tx")
require.Equal(abci.CodeType_OK, evt.Code)
return nil
})
}
@ -347,14 +297,12 @@ var testCasesUnsafeSetConfig = [][]string{
func TestURIUnsafeSetConfig(t *testing.T) {
for _, testCase := range testCasesUnsafeSetConfig {
tmResult := new(ctypes.TMResult)
_, err := clientURI.Call("unsafe_set_config", map[string]interface{}{
_, err := GetURIClient().Call("unsafe_set_config", map[string]interface{}{
"type": testCase[0],
"key": testCase[1],
"value": testCase[2],
}, tmResult)
if err != nil {
panic(err)
}
require.Nil(t, err)
}
testUnsafeSetConfig(t)
}
@ -362,27 +310,20 @@ func TestURIUnsafeSetConfig(t *testing.T) {
func TestJSONUnsafeSetConfig(t *testing.T) {
for _, testCase := range testCasesUnsafeSetConfig {
tmResult := new(ctypes.TMResult)
_, err := clientJSON.Call("unsafe_set_config", []interface{}{testCase[0], testCase[1], testCase[2]}, tmResult)
if err != nil {
panic(err)
}
_, err := GetJSONClient().Call("unsafe_set_config", []interface{}{testCase[0], testCase[1], testCase[2]}, tmResult)
require.Nil(t, err)
}
testUnsafeSetConfig(t)
}
func testUnsafeSetConfig(t *testing.T) {
require := require.New(t)
s := config.GetString("key1")
if s != stringVal {
panic(Fmt("got %v, expected %v", s, stringVal))
}
require.Equal(stringVal, s)
i := config.GetInt("key2")
if i != intVal {
panic(Fmt("got %v, expected %v", i, intVal))
}
require.Equal(intVal, i)
b := config.GetBool("key3")
if b != boolVal {
panic(Fmt("got %v, expected %v", b, boolVal))
}
require.Equal(boolVal, b)
}

+ 6
- 10
rpc/test/grpc_test.go View File

@ -5,20 +5,16 @@ import (
"golang.org/x/net/context"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/rpc/grpc"
)
//-------------------------------------------
func TestBroadcastTx(t *testing.T) {
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")})
if err != nil {
t.Fatal(err)
}
if res.CheckTx.Code != 0 {
t.Fatalf("Non-zero check tx code: %d", res.CheckTx.Code)
}
if res.DeliverTx.Code != 0 {
t.Fatalf("Non-zero append tx code: %d", res.DeliverTx.Code)
}
require := require.New(t)
res, err := GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")})
require.Nil(err)
require.EqualValues(0, res.CheckTx.Code)
require.EqualValues(0, res.DeliverTx.Code)
}

+ 90
- 73
rpc/test/helpers.go View File

@ -1,104 +1,124 @@
package rpctest
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
"github.com/stretchr/testify/require"
logger "github.com/tendermint/go-logger"
wire "github.com/tendermint/go-wire"
abci "github.com/tendermint/abci/types"
cfg "github.com/tendermint/go-config"
client "github.com/tendermint/go-rpc/client"
"github.com/tendermint/tendermint/config/tendermint_test"
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/proxy"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/rpc/grpc"
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
"github.com/tendermint/tendermint/types"
)
// global variables for use across all tests
var (
config cfg.Config
node *nm.Node
chainID string
rpcAddr string
requestAddr string
websocketAddr string
websocketEndpoint string
grpcAddr string
clientURI *client.ClientURI
clientJSON *client.ClientJSONRPC
clientGRPC core_grpc.BroadcastAPIClient
config cfg.Config
)
// initialize config and create new node
func init() {
config = tendermint_test.ResetConfig("rpc_test_client_test")
chainID = config.GetString("chain_id")
rpcAddr = config.GetString("rpc_laddr")
grpcAddr = config.GetString("grpc_laddr")
requestAddr = rpcAddr
websocketAddr = rpcAddr
websocketEndpoint = "/websocket"
clientURI = client.NewClientURI(requestAddr)
clientJSON = client.NewClientJSONRPC(requestAddr)
clientGRPC = core_grpc.StartGRPCClient(grpcAddr)
// TODO: change consensus/state.go timeouts to be shorter
// start a node
ready := make(chan struct{})
go newNode(ready)
<-ready
const tmLogLevel = "error"
// f**ing long, but unique for each test
func makePathname() string {
// get path
p, err := os.Getwd()
if err != nil {
panic(err)
}
fmt.Println(p)
sep := string(filepath.Separator)
return strings.Replace(p, sep, "_", -1)
}
// create a new node and sleep forever
func newNode(ready chan struct{}) {
// Create & start node
node = nm.NewNodeDefault(config)
protocol, address := nm.ProtocolAndAddress(config.GetString("node_laddr"))
l := p2p.NewDefaultListener(protocol, address, true)
node.AddListener(l)
node.Start()
func randPort() int {
// returns between base and base + spread
base, spread := 20000, 20000
return base + rand.Intn(spread)
}
// Run the RPC server.
node.StartRPC()
time.Sleep(time.Second)
func makeAddrs() (string, string, string) {
start := randPort()
return fmt.Sprintf("tcp://0.0.0.0:%d", start),
fmt.Sprintf("tcp://0.0.0.0:%d", start+1),
fmt.Sprintf("tcp://0.0.0.0:%d", start+2)
}
ready <- struct{}{}
// GetConfig returns a config for the test cases as a singleton
func GetConfig() cfg.Config {
if config == nil {
pathname := makePathname()
config = tendermint_test.ResetConfig(pathname)
// Shut up the logging
logger.SetLogLevel(tmLogLevel)
// and we use random ports to run in parallel
tm, rpc, grpc := makeAddrs()
config.Set("node_laddr", tm)
config.Set("rpc_laddr", rpc)
config.Set("grpc_laddr", grpc)
}
return config
}
// Sleep forever
ch := make(chan struct{})
<-ch
// GetURIClient gets a uri client pointing to the test tendermint rpc
func GetURIClient() *client.ClientURI {
rpcAddr := GetConfig().GetString("rpc_laddr")
return client.NewClientURI(rpcAddr)
}
//--------------------------------------------------------------------------------
// Utilities for testing the websocket service
// GetJSONClient gets a http/json client pointing to the test tendermint rpc
func GetJSONClient() *client.ClientJSONRPC {
rpcAddr := GetConfig().GetString("rpc_laddr")
return client.NewClientJSONRPC(rpcAddr)
}
// create a new connection
func newWSClient(t *testing.T) *client.WSClient {
wsc := client.NewWSClient(websocketAddr, websocketEndpoint)
func GetGRPCClient() core_grpc.BroadcastAPIClient {
grpcAddr := config.GetString("grpc_laddr")
return core_grpc.StartGRPCClient(grpcAddr)
}
func GetWSClient() *client.WSClient {
rpcAddr := GetConfig().GetString("rpc_laddr")
wsc := client.NewWSClient(rpcAddr, "/websocket")
if _, err := wsc.Start(); err != nil {
panic(err)
}
return wsc
}
// subscribe to an event
func subscribe(t *testing.T, wsc *client.WSClient, eventid string) {
if err := wsc.Subscribe(eventid); err != nil {
panic(err)
}
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
func StartTendermint(app abci.Application) *nm.Node {
node := NewTendermint(app)
node.Start()
fmt.Println("Tendermint running!")
return node
}
// unsubscribe from an event
func unsubscribe(t *testing.T, wsc *client.WSClient, eventid string) {
if err := wsc.Unsubscribe(eventid); err != nil {
panic(err)
}
// NewTendermint creates a new tendermint server and sleeps forever
func NewTendermint(app abci.Application) *nm.Node {
// Create & start node
config := GetConfig()
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
papp := proxy.NewLocalClientCreator(app)
node := nm.NewNode(config, privValidator, papp)
return node
}
//--------------------------------------------------------------------------------
// Utilities for testing the websocket service
// wait for an event; do things that might trigger events, and check them when they are received
// the check function takes an event id and the byte slice read off the ws
func waitForEvent(t *testing.T, wsc *client.WSClient, eventid string, dieOnTimeout bool, f func(), check func(string, interface{}) error) {
@ -142,7 +162,7 @@ func waitForEvent(t *testing.T, wsc *client.WSClient, eventid string, dieOnTimeo
case <-timeout.C:
if dieOnTimeout {
wsc.Stop()
panic(Fmt("%s event was not received in time", eventid))
require.True(t, false, "%s event was not received in time", eventid)
}
// else that's great, we didn't hear the event
// and we shouldn't have
@ -150,16 +170,13 @@ func waitForEvent(t *testing.T, wsc *client.WSClient, eventid string, dieOnTimeo
if dieOnTimeout {
// message was received and expected
// run the check
if err := check(eventid, eventData); err != nil {
panic(err) // Show the stack trace.
}
require.Nil(t, check(eventid, eventData))
} else {
wsc.Stop()
panic(Fmt("%s event was not expected", eventid))
require.True(t, false, "%s event was not expected", eventid)
}
case err := <-errCh:
panic(err) // Show the stack trace.
}
}


+ 36
- 0
rpc/test/main_test.go View File

@ -0,0 +1,36 @@
/*
package tests contain integration tests and helper functions for testing
the RPC interface
In particular, it allows us to spin up a tendermint node in process, with
a live RPC server, which we can use to verify our rpc calls. It provides
all data structures, enabling us to do more complex tests (like node_test.go)
that introspect the blocks themselves to validate signatures and the like.
It currently only spins up one node, it would be interesting to expand it
to multiple nodes to see the real effects of validating partially signed
blocks.
*/
package rpctest
import (
"os"
"testing"
"github.com/tendermint/abci/example/dummy"
nm "github.com/tendermint/tendermint/node"
)
var node *nm.Node
func TestMain(m *testing.M) {
// start a tendermint node (and merkleeyes) in the background to test against
app := dummy.NewDummyApplication()
node = StartTendermint(app)
code := m.Run()
// and shut down proper at the end
node.Stop()
node.Wait()
os.Exit(code)
}

+ 51
- 0
scripts/dist.sh View File

@ -0,0 +1,51 @@
#!/usr/bin/env bash
set -e
# Get the version from the environment, or try to figure it out.
if [ -z $VERSION ]; then
VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go)
fi
if [ -z "$VERSION" ]; then
echo "Please specify a version."
exit 1
fi
echo "==> Building version $VERSION..."
# Get the parent directory of where this script is.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
# Generate the tag.
if [ -z "$NOTAG" ]; then
echo "==> Tagging..."
git commit --allow-empty -a -m "Release v$VERSION"
git tag -a -m "Version $VERSION" "v${VERSION}" master
fi
# Do a hermetic build inside a Docker container.
docker build -t tendermint/tendermint-builder scripts/tendermint-builder/
docker run --rm -e "BUILD_TAGS=$BUILD_TAGS" -v "$(pwd)":/go/src/github.com/tendermint/tendermint tendermint/tendermint-builder ./scripts/dist_build.sh
# Add "tendermint" and $VERSION prefix to package name.
rm -rf ./build/dist
mkdir -p ./build/dist
for FILENAME in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type f); do
FILENAME=$(basename "$FILENAME")
cp "./build/pkg/${FILENAME}" "./build/dist/tendermint_${VERSION}_${FILENAME}"
done
# Make the checksums.
pushd ./build/dist
shasum -a256 ./* > "./tendermint_${VERSION}_SHA256SUMS"
popd
# Done
echo
echo "==> Results:"
ls -hl ./build/dist
exit 0

+ 54
- 0
scripts/dist_build.sh View File

@ -0,0 +1,54 @@
#!/usr/bin/env bash
set -e
# Get the parent directory of where this script is.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
# Get the git commit
GIT_COMMIT="$(git rev-parse --short HEAD)"
GIT_DESCRIBE="$(git describe --tags --always)"
GIT_IMPORT="github.com/tendermint/tendermint/version"
# Determine the arch/os combos we're building for
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
# Delete the old dir
echo "==> Removing old directory..."
rm -rf build/pkg
mkdir -p build/pkg
# Make sure build tools are available.
make tools
# Get VENDORED dependencies
make get_vendor_deps
# Build!
echo "==> Building..."
"$(which gox)" \
-os="${XC_OS}" \
-arch="${XC_ARCH}" \
-osarch="!darwin/arm !solaris/amd64 !freebsd/amd64" \
-ldflags "-X ${GIT_IMPORT}.GitCommit='${GIT_COMMIT}' -X ${GIT_IMPORT}.GitDescribe='${GIT_DESCRIBE}'" \
-output "build/pkg/{{.OS}}_{{.Arch}}/tendermint" \
-tags="${BUILD_TAGS}" \
github.com/tendermint/tendermint/cmd/tendermint
# Zip all the files.
echo "==> Packaging..."
for PLATFORM in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type d); do
OSARCH=$(basename "${PLATFORM}")
echo "--> ${OSARCH}"
pushd "$PLATFORM" >/dev/null 2>&1
zip "../${OSARCH}.zip" ./*
popd >/dev/null 2>&1
done
exit 0

+ 12
- 0
scripts/tendermint-builder/Dockerfile View File

@ -0,0 +1,12 @@
FROM golang:1.7.4
RUN apt-get update && apt-get install -y --no-install-recommends \
zip \
&& rm -rf /var/lib/apt/lists/*
# We want to ensure that release builds never have any cgo dependencies so we
# switch that off at the highest level.
ENV CGO_ENABLED 0
RUN mkdir -p $GOPATH/src/github.com/tendermint/tendermint
WORKDIR $GOPATH/src/github.com/tendermint/tendermint

+ 16
- 16
state/errors.go View File

@ -9,47 +9,47 @@ type (
ErrProxyAppConn error
ErrUnknownBlock struct {
height int
Height int
}
ErrBlockHashMismatch struct {
coreHash []byte
appHash []byte
height int
CoreHash []byte
AppHash []byte
Height int
}
ErrAppBlockHeightTooHigh struct {
coreHeight int
appHeight int
CoreHeight int
AppHeight int
}
ErrLastStateMismatch struct {
height int
core []byte
app []byte
Height int
Core []byte
App []byte
}
ErrStateMismatch struct {
got *State
expected *State
Got *State
Expected *State
}
)
func (e ErrUnknownBlock) Error() string {
return Fmt("Could not find block #%d", e.height)
return Fmt("Could not find block #%d", e.Height)
}
func (e ErrBlockHashMismatch) Error() string {
return Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.appHash, e.coreHash, e.height)
return Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height)
}
func (e ErrAppBlockHeightTooHigh) Error() string {
return Fmt("App block height (%d) is higher than core (%d)", e.appHeight, e.coreHeight)
return Fmt("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight)
}
func (e ErrLastStateMismatch) Error() string {
return Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.height, e.core, e.app)
return Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App)
}
func (e ErrStateMismatch) Error() string {
return Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.got, e.expected)
return Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected)
}

+ 19
- 166
state/execution.go View File

@ -1,17 +1,15 @@
package state
import (
"bytes"
"errors"
"github.com/ebuchman/fail-test"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
abci "github.com/tendermint/abci/types"
)
//--------------------------------------------------
@ -54,10 +52,6 @@ func (s *State) ExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnC
nextValSet.IncrementAccum(1)
s.SetBlockAndValidators(block.Header, blockPartsHeader, valSet, nextValSet)
// save state with updated height/blockhash/validators
// but stale apphash, in case we fail between Commit and Save
s.SaveIntermediate()
fail.Fail() // XXX
return nil
@ -229,7 +223,7 @@ func (s *State) validateBlock(block *types.Block) error {
// Execute and commit block against app, save block and state
func (s *State) ApplyBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus,
block *types.Block, partsHeader types.PartSetHeader, mempool Mempool) error {
block *types.Block, partsHeader types.PartSetHeader, mempool types.Mempool) error {
// Run the block on the State:
// + update validator sets
@ -250,7 +244,7 @@ func (s *State) ApplyBlock(eventCache types.Fireable, proxyAppConn proxy.AppConn
// mempool must be locked during commit and update
// because state is typically reset on Commit and old txs must be replayed
// against committed state before new txs are run in the mempool, lest they be invalid
func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, block *types.Block, mempool Mempool) error {
func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, block *types.Block, mempool types.Mempool) error {
mempool.Lock()
defer mempool.Unlock()
@ -264,6 +258,7 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl
log.Debug("Commit.Log: " + res.Log)
}
log.Info("Committed state", "hash", res.Data)
// Set the state's new AppHash
s.AppHash = res.Data
@ -273,165 +268,23 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl
return nil
}
// Updates to the mempool need to be synchronized with committing a block
// so apps can reset their transient state on Commit
type Mempool interface {
Lock()
Unlock()
Update(height int, txs []types.Tx)
}
type MockMempool struct {
}
func (m MockMempool) Lock() {}
func (m MockMempool) Unlock() {}
func (m MockMempool) Update(height int, txs []types.Tx) {}
//----------------------------------------------------------------
// Handshake with app to sync to latest state of core by replaying blocks
// TODO: Should we move blockchain/store.go to its own package?
type BlockStore interface {
Height() int
LoadBlock(height int) *types.Block
LoadBlockMeta(height int) *types.BlockMeta
}
type Handshaker struct {
config cfg.Config
state *State
store BlockStore
nBlocks int // number of blocks applied to the state
}
func NewHandshaker(config cfg.Config, state *State, store BlockStore) *Handshaker {
return &Handshaker{config, state, store, 0}
}
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn
res, err := proxyApp.Query().InfoSync()
// Apply and commit a block, but without all the state validation.
// Returns the application root hash (result of abci.Commit)
func ApplyBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block) ([]byte, error) {
var eventCache types.Fireable // nil
_, err := execBlockOnProxyApp(eventCache, appConnConsensus, block)
if err != nil {
return errors.New(Fmt("Error calling Info: %v", err))
log.Warn("Error executing block on proxy app", "height", block.Height, "err", err)
return nil, err
}
blockHeight := int(res.LastBlockHeight) // XXX: beware overflow
appHash := res.LastBlockAppHash
log.Notice("ABCI Handshake", "appHeight", blockHeight, "appHash", appHash)
// TODO: check version
// replay blocks up to the latest in the blockstore
err = h.ReplayBlocks(appHash, blockHeight, proxyApp.Consensus())
if err != nil {
return errors.New(Fmt("Error on replay: %v", err))
// Commit block, get hash back
res := appConnConsensus.CommitSync()
if res.IsErr() {
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
return nil, res
}
// Save the state
h.state.Save()
// TODO: (on restart) replay mempool
return nil
}
// Replay all blocks after blockHeight and ensure the result matches the current state.
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, appConnConsensus proxy.AppConnConsensus) error {
storeBlockHeight := h.store.Height()
stateBlockHeight := h.state.LastBlockHeight
log.Notice("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
if storeBlockHeight == 0 {
return nil
} else if storeBlockHeight < appBlockHeight {
// if the app is ahead, there's nothing we can do
return ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
} else if storeBlockHeight == appBlockHeight {
// We ran Commit, but if we crashed before state.Save(),
// load the intermediate state and update the state.AppHash.
// NOTE: If ABCI allowed rollbacks, we could just replay the
// block even though it's been committed
stateAppHash := h.state.AppHash
lastBlockAppHash := h.store.LoadBlock(storeBlockHeight).AppHash
if bytes.Equal(stateAppHash, appHash) {
// we're all synced up
log.Debug("ABCI RelpayBlocks: Already synced")
} else if bytes.Equal(stateAppHash, lastBlockAppHash) {
// we crashed after commit and before saving state,
// so load the intermediate state and update the hash
h.state.LoadIntermediate()
h.state.AppHash = appHash
log.Debug("ABCI RelpayBlocks: Loaded intermediate state and updated state.AppHash")
} else {
PanicSanity(Fmt("Unexpected state.AppHash: state.AppHash %X; app.AppHash %X, lastBlock.AppHash %X", stateAppHash, appHash, lastBlockAppHash))
}
return nil
} else if storeBlockHeight == appBlockHeight+1 &&
storeBlockHeight == stateBlockHeight+1 {
// We crashed after saving the block
// but before Commit (both the state and app are behind),
// so just replay the block
// check that the lastBlock.AppHash matches the state apphash
block := h.store.LoadBlock(storeBlockHeight)
if !bytes.Equal(block.Header.AppHash, appHash) {
return ErrLastStateMismatch{storeBlockHeight, block.Header.AppHash, appHash}
}
blockMeta := h.store.LoadBlockMeta(storeBlockHeight)
h.nBlocks += 1
var eventCache types.Fireable // nil
// replay the latest block
return h.state.ApplyBlock(eventCache, appConnConsensus, block, blockMeta.PartsHeader, MockMempool{})
} else if storeBlockHeight != stateBlockHeight {
// unless we failed before committing or saving state (previous 2 case),
// the store and state should be at the same height!
PanicSanity(Fmt("Expected storeHeight (%d) and stateHeight (%d) to match.", storeBlockHeight, stateBlockHeight))
} else {
// store is more than one ahead,
// so app wants to replay many blocks
// replay all blocks starting with appBlockHeight+1
var eventCache types.Fireable // nil
// TODO: use stateBlockHeight instead and let the consensus state
// do the replay
var appHash []byte
for i := appBlockHeight + 1; i <= storeBlockHeight; i++ {
h.nBlocks += 1
block := h.store.LoadBlock(i)
_, err := execBlockOnProxyApp(eventCache, appConnConsensus, block)
if err != nil {
log.Warn("Error executing block on proxy app", "height", i, "err", err)
return err
}
// Commit block, get hash back
res := appConnConsensus.CommitSync()
if res.IsErr() {
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
return res
}
if res.Log != "" {
log.Info("Commit.Log: " + res.Log)
}
appHash = res.Data
}
if !bytes.Equal(h.state.AppHash, appHash) {
return errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash))
}
return nil
if res.Log != "" {
log.Info("Commit.Log: " + res.Log)
}
return nil
return res.Data, nil
}

+ 0
- 210
state/execution_test.go View File

@ -1,210 +0,0 @@
package state
import (
"bytes"
"fmt"
"path"
"testing"
"github.com/tendermint/tendermint/config/tendermint_test"
// . "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/abci/example/dummy"
)
var (
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("handshake_test"))
chainID = "handshake_chain"
nBlocks = 5
mempool = MockMempool{}
testPartSize = 65536
)
//---------------------------------------
// Test block execution
func TestExecBlock(t *testing.T) {
// TODO
}
//---------------------------------------
// Test handshake/replay
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
testHandshakeReplay(t, 0)
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
testHandshakeReplay(t, 1)
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
testHandshakeReplay(t, nBlocks-1)
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
testHandshakeReplay(t, nBlocks)
}
// Make some blocks. Start a fresh app and apply n blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, n int) {
config := tendermint_test.ResetConfig("proxy_test_")
state, store := stateAndStore(config)
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "1")))
clientCreator2 := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "2")))
proxyApp := proxy.NewAppConns(config, clientCreator, NewHandshaker(config, state, store))
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
chain := makeBlockchain(t, proxyApp, state)
store.chain = chain //
latestAppHash := state.AppHash
proxyApp.Stop()
if n > 0 {
// start a new app without handshake, play n blocks
proxyApp = proxy.NewAppConns(config, clientCreator2, nil)
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
state2, _ := stateAndStore(config)
for i := 0; i < n; i++ {
block := chain[i]
err := state2.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(err)
}
}
proxyApp.Stop()
}
// now start it with the handshake
handshaker := NewHandshaker(config, state, store)
proxyApp = proxy.NewAppConns(config, clientCreator2, handshaker)
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync()
if err != nil {
t.Fatal(err)
}
// the app hash should be synced up
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
}
if handshaker.nBlocks != nBlocks-n {
t.Fatalf("Expected handshake to sync %d blocks, got %d", nBlocks-n, handshaker.nBlocks)
}
}
//--------------------------
// utils for making blocks
// make some bogus txs
func txsFunc(blockNum int) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
}
return txs
}
// sign a commit vote
func signCommit(height, round int, hash []byte, header types.PartSetHeader) *types.Vote {
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: privKey.PubKey().Address(),
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{hash, header},
}
sig := privKey.Sign(types.SignBytes(chainID, vote))
vote.Signature = sig
return vote
}
// make a blockchain with one validator
func makeBlockchain(t *testing.T, proxyApp proxy.AppConns, state *State) (blockchain []*types.Block) {
prevHash := state.LastBlockID.Hash
lastCommit := new(types.Commit)
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
for i := 1; i < nBlocks+1; i++ {
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit,
prevBlockID, valHash, state.AppHash, testPartSize)
fmt.Println(i)
fmt.Println(prevBlockID)
fmt.Println(block.LastBlockID)
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(i, err)
}
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators)
vote := signCommit(i, 0, block.Hash(), parts.Header())
_, err = voteSet.AddVote(vote)
if err != nil {
t.Fatal(err)
}
blockchain = append(blockchain, block)
prevHash = block.Hash()
prevParts = parts.Header()
lastCommit = voteSet.MakeCommit()
prevBlockID = types.BlockID{prevHash, prevParts}
}
return blockchain
}
// fresh state and mock store
func stateAndStore(config cfg.Config) (*State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
return MakeGenesisState(stateDB, &types.GenesisDoc{
ChainID: chainID,
Validators: []types.GenesisValidator{
types.GenesisValidator{privKey.PubKey(), 10000, "test"},
},
AppHash: nil,
}), NewMockBlockStore(config, nil)
}
//----------------------------------
// mock block store
type mockBlockStore struct {
config cfg.Config
chain []*types.Block
}
func NewMockBlockStore(config cfg.Config, chain []*types.Block) *mockBlockStore {
return &mockBlockStore{config, chain}
}
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] }
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
Hash: block.Hash(),
Header: block.Header,
PartsHeader: block.MakePartSet(bs.config.GetInt("block_part_size")).Header(),
}
}

+ 5
- 32
state/state.go View File

@ -14,8 +14,7 @@ import (
)
var (
stateKey = []byte("stateKey")
stateIntermediateKey = []byte("stateIntermediateKey")
stateKey = []byte("stateKey")
)
//-----------------------------------------------------------------------------
@ -82,35 +81,6 @@ func (s *State) Save() {
s.db.SetSync(stateKey, s.Bytes())
}
func (s *State) SaveIntermediate() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.db.SetSync(stateIntermediateKey, s.Bytes())
}
// Load the intermediate state into the current state
// and do some sanity checks
func (s *State) LoadIntermediate() {
s2 := loadState(s.db, stateIntermediateKey)
if s.ChainID != s2.ChainID {
PanicSanity(Fmt("State mismatch for ChainID. Got %v, Expected %v", s2.ChainID, s.ChainID))
}
if s.LastBlockHeight+1 != s2.LastBlockHeight {
PanicSanity(Fmt("State mismatch for LastBlockHeight. Got %v, Expected %v", s2.LastBlockHeight, s.LastBlockHeight+1))
}
if !bytes.Equal(s.Validators.Hash(), s2.LastValidators.Hash()) {
PanicSanity(Fmt("State mismatch for LastValidators. Got %X, Expected %X", s2.LastValidators.Hash(), s.Validators.Hash()))
}
if !bytes.Equal(s.AppHash, s2.AppHash) {
PanicSanity(Fmt("State mismatch for AppHash. Got %X, Expected %X", s2.AppHash, s.AppHash))
}
s.setBlockAndValidators(s2.LastBlockHeight, s2.LastBlockID, s2.LastBlockTime, s2.Validators.Copy(), s2.LastValidators.Copy())
}
func (s *State) Equals(s2 *State) bool {
return bytes.Equal(s.Bytes(), s2.Bytes())
}
@ -166,7 +136,10 @@ func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) *State {
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
}
genDoc := types.GenesisDocFromJSON(genDocJSON)
genDoc, err := types.GenesisDocFromJSON(genDocJSON)
if err != nil {
Exit(Fmt("Error reading GenesisDoc: %v", err))
}
return MakeGenesisState(db, genDoc)
}


+ 6
- 4
test/README.md View File

@ -3,26 +3,28 @@
The unit tests (ie. the `go test` s) can be run with `make test`.
The integration tests can be run wtih `make test_integrations`.
Running the integrations test will build a docker container with latest tendermint
Running the integrations test will build a docker container with local version of tendermint
and run the following tests in docker containers:
- go tests, with --race
- includes test coverage
- app tests
- dummy app over socket
- counter app over socket
- counter app over grpc
- persistence tests
- crash tendermint at each of many predefined points, restart, and ensure it syncs properly with the app
- p2p tests
- start a local dummy app testnet on a docker network (requires docker version 1.10+)
- send a tx on each node and ensure the state root is updated on all of them
- crash and restart nodes one at a time and ensure they can sync back up (via fastsync)
- crash and restart all nodes at once and ensure they can sync back up
If on a `release-x.x.x` branch, we also run
- `go test` for all our dependency libs (test/test_libs.sh)
- network_testing - benchmark a mintnet based cloud deploy using netmon
# Coverage
TODO!

+ 13
- 13
test/app/dummy_test.sh View File

@ -30,9 +30,9 @@ echo "... testing query with abci-cli"
RESPONSE=`abci-cli query \"$KEY\"`
set +e
A=`echo $RESPONSE | grep '"exists":true'`
A=`echo $RESPONSE | grep "$VALUE"`
if [[ $? != 0 ]]; then
echo "Failed to find 'exists=true' for $KEY. Response:"
echo "Failed to find $VALUE for $KEY. Response:"
echo "$RESPONSE"
exit 1
fi
@ -41,9 +41,9 @@ set -e
# we should not be able to look up the value
RESPONSE=`abci-cli query \"$VALUE\"`
set +e
A=`echo $RESPONSE | grep '"exists":true'`
A=`echo $RESPONSE | grep $VALUE`
if [[ $? == 0 ]]; then
echo "Found 'exists=true' for $VALUE when we should not have. Response:"
echo "Found '$VALUE' for $VALUE when we should not have. Response:"
echo "$RESPONSE"
exit 1
fi
@ -53,28 +53,28 @@ set -e
# test using the /abci_query
#############################
echo "... testing query with /abci_query"
echo "... testing query with /abci_query 2"
# we should be able to look up the key
RESPONSE=`curl -s 127.0.0.1:46657/abci_query?query=$(toHex $KEY)`
RESPONSE=`echo $RESPONSE | jq .result[1].result.Data | xxd -r -p`
RESPONSE=`curl -s "127.0.0.1:46657/abci_query?path=\"\"&data=$(toHex $KEY)&prove=false"`
RESPONSE=`echo $RESPONSE | jq .result[1].response.log`
set +e
A=`echo $RESPONSE | grep '"exists":true'`
A=`echo $RESPONSE | grep 'exists'`
if [[ $? != 0 ]]; then
echo "Failed to find 'exists=true' for $KEY. Response:"
echo "Failed to find 'exists' for $KEY. Response:"
echo "$RESPONSE"
exit 1
fi
set -e
# we should not be able to look up the value
RESPONSE=`curl -s 127.0.0.1:46657/abci_query?query=\"$(toHex $VALUE)\"`
RESPONSE=`echo $RESPONSE | jq .result[1].result.Data | xxd -r -p`
RESPONSE=`curl -s "127.0.0.1:46657/abci_query?path=\"\"&data=$(toHex $VALUE)&prove=false"`
RESPONSE=`echo $RESPONSE | jq .result[1].response.log`
set +e
A=`echo $RESPONSE | grep '"exists":true'`
A=`echo $RESPONSE | grep 'exists'`
if [[ $? == 0 ]]; then
echo "Found 'exists=true' for $VALUE when we should not have. Response:"
echo "Found 'exists' for $VALUE when we should not have. Response:"
echo "$RESPONSE"
exit 1
fi


+ 13
- 5
test/docker/Dockerfile View File

@ -1,25 +1,33 @@
# Pull base image.
FROM golang:1.6
FROM golang:1.7.4
# Add testing deps for curl
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list
# Grab deps (jq, hexdump, xxd, killall)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq bsdmainutils vim-common psmisc
jq bsdmainutils vim-common psmisc netcat curl
# Setup tendermint repo with vendored dependencies
# but without code - docker caching prevents reinstall on code change!
# Setup tendermint repo
ENV REPO $GOPATH/src/github.com/tendermint/tendermint
WORKDIR $REPO
# Install the vendored dependencies before copying code
# docker caching prevents reinstall on code change!
ADD glide.yaml glide.yaml
ADD glide.lock glide.lock
ADD Makefile Makefile
RUN make get_vendor_deps
# Install the apps
ADD scripts scripts
RUN bash scripts/install_abci_apps.sh
# Now copy in the code
COPY . $REPO
RUN go install ./cmd/tendermint
RUN bash scripts/install_abci_apps.sh
# expose the volume for debugging
VOLUME $REPO


+ 3
- 8
test/net/start.sh View File

@ -17,18 +17,13 @@ if [[ "$MACH_PREFIX" == "" ]]; then
fi
set -u
export TMHEAD=`git rev-parse --abbrev-ref HEAD`
export TM_IMAGE="tendermint/tmbase"
cd $GOPATH/src/github.com/tendermint/network_testing
cd "$GOPATH/src/github.com/tendermint/network_testing"
echo "... running network test $(pwd)"
bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER
TMHEAD=$(git rev-parse --abbrev-ref HEAD) TM_IMAGE="tendermint/tendermint" bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER
# TODO: publish result!
# cleanup
echo "... destroying machines"
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE]
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE]

+ 54
- 0
test/p2p/README.md View File

@ -0,0 +1,54 @@
# Tendermint P2P Tests
These scripts facilitate setting up and testing a local testnet using docker containers.
Setup your own local testnet as follows.
For consistency, we assume all commands are run from the Tendermint repository root (ie. $GOPATH/src/github.com/tendermint/tendermint).
First, build the docker image:
```
docker build -t tendermint_tester -f ./test/docker/Dockerfile .
```
Now create the docker network:
```
docker network create --driver bridge --subnet 172.57.0.0/16 my_testnet
```
This gives us a new network with IP addresses in the rage `172.57.0.0 - 172.57.255.255`.
Peers on the network can have any IP address in this range.
For our four node network, let's pick `172.57.0.101 - 172.57.0.104`.
Since we use Tendermint's default listening port of 46656, our list of seed nodes will look like:
```
172.57.0.101:46656,172.57.0.102:46656,172.57.0.103:46656,172.57.0.104:46656
```
Now we can start up the peers. We already have config files setup in `test/p2p/data/`.
Let's use a for-loop to start our peers:
```
for i in $(seq 1 4); do
docker run -d \
--net=my_testnet\
--ip="172.57.0.$((100 + $i))" \
--name local_testnet_$i \
--entrypoint tendermint \
-e TMROOT=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \
tendermint_tester node --seeds 172.57.0.101:46656,172.57.0.102:46656,172.57.0.103:46656,172.57.0.104:46656 --proxy_app=dummy
done
```
If you now run `docker ps`, you'll see your containers!
We can confirm they are making blocks by checking the `/status` message using `curl` and `jq` to pretty print the output json:
```
curl 172.57.0.101:46657/status | jq .
```

+ 4
- 4
test/p2p/client.sh View File

@ -11,9 +11,9 @@ NAME=test_container_$ID
echo "starting test client container with CMD=$CMD"
# run the test container on the local network
docker run -t --rm \
-v $GOPATH/src/github.com/tendermint/tendermint/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p \
--net=$NETWORK_NAME \
-v "$GOPATH/src/github.com/tendermint/tendermint/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p" \
--net="$NETWORK_NAME" \
--ip=$(test/p2p/ip.sh "-1") \
--name $NAME \
--name "$NAME" \
--entrypoint bash \
$DOCKER_IMAGE $CMD
"$DOCKER_IMAGE" $CMD

+ 1
- 1
test/p2p/fast_sync/test_peer.sh View File

@ -27,7 +27,7 @@ SEEDS="$(test/p2p/ip.sh 1):46656"
for j in `seq 2 $N`; do
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656"
done
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP $SEEDS
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP "--seeds $SEEDS --pex"
# wait for peer to sync and check the app hash
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$ID "test/p2p/fast_sync/check_peer.sh $ID"


+ 1
- 0
test/p2p/kill_all/check_peers.sh View File

@ -40,6 +40,7 @@ for i in $(seq 2 "$NUM_OF_PEERS"); do
((attempt++))
if [ "$attempt" -ge $MAX_ATTEMPTS_TO_CATCH_UP ] ; then
echo "$attempt unsuccessful attempts were made to catch up"
curl -s "$addr/dump_consensus_state" | jq .result[1]
exit 1
fi


+ 12
- 10
test/p2p/local_testnet_start.sh View File

@ -6,17 +6,19 @@ NETWORK_NAME=$2
N=$3
APP_PROXY=$4
cd $GOPATH/src/github.com/tendermint/tendermint
set +u
SEEDS=$5
if [[ "$SEEDS" != "" ]]; then
echo "Seeds: $SEEDS"
SEEDS="--seeds $SEEDS"
fi
set -u
# create docker network
docker network create --driver bridge --subnet 172.57.0.0/16 $NETWORK_NAME
cd "$GOPATH/src/github.com/tendermint/tendermint"
seeds="$(test/p2p/ip.sh 1):46656"
for i in `seq 2 $N`; do
seeds="$seeds,$(test/p2p/ip.sh $i):46656"
done
echo "Seeds: $seeds"
# create docker network
docker network create --driver bridge --subnet 172.57.0.0/16 "$NETWORK_NAME"
for i in `seq 1 $N`; do
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $APP_PROXY $seeds
for i in $(seq 1 "$N"); do
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$i" "$APP_PROXY" "$SEEDS --pex"
done

+ 4
- 4
test/p2p/local_testnet_stop.sh View File

@ -4,9 +4,9 @@ set -u
NETWORK_NAME=$1
N=$2
for i in `seq 1 $N`; do
docker stop local_testnet_$i
docker rm -vf local_testnet_$i
for i in $(seq 1 "$N"); do
docker stop "local_testnet_$i"
docker rm -vf "local_testnet_$i"
done
docker network rm $NETWORK_NAME
docker network rm "$NETWORK_NAME"

+ 26
- 11
test/p2p/peer.sh View File

@ -7,18 +7,33 @@ ID=$3
APP_PROXY=$4
set +u
SEEDS=$5
NODE_FLAGS=$5
set -u
if [[ "$SEEDS" != "" ]]; then
SEEDS=" --seeds $SEEDS "
fi
set +eu
echo "starting tendermint peer ID=$ID"
# start tendermint container on the network
docker run -d \
--net=$NETWORK_NAME \
--ip=$(test/p2p/ip.sh $ID) \
--name local_testnet_$ID \
--entrypoint tendermint \
-e TMROOT=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core \
$DOCKER_IMAGE node $SEEDS --proxy_app=$APP_PROXY
if [[ "$CIRCLECI" == true ]]; then
set -u
docker run -d \
--net="$NETWORK_NAME" \
--ip=$(test/p2p/ip.sh $ID) \
--name "local_testnet_$ID" \
--entrypoint tendermint \
-e TMROOT="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \
--log-driver=syslog \
--log-opt syslog-address=udp://127.0.0.1:5514 \
--log-opt syslog-facility=daemon \
--log-opt tag="{{.Name}}" \
"$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY"
else
set -u
docker run -d \
--net="$NETWORK_NAME" \
--ip=$(test/p2p/ip.sh $ID) \
--name "local_testnet_$ID" \
--entrypoint tendermint \
-e TMROOT="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \
"$DOCKER_IMAGE" node $NODE_FLAGS --log_level=info --proxy_app="$APP_PROXY"
fi

+ 17
- 0
test/p2p/pex/check_peer.sh View File

@ -0,0 +1,17 @@
#! /bin/bash
set -u
ID=$1
N=$2
addr=$(test/p2p/ip.sh "$ID"):46657
echo "2. wait until peer $ID connects to other nodes using pex reactor"
peers_count="0"
while [[ "$peers_count" -lt "$((N-1))" ]]; do
sleep 1
peers_count=$(curl -s "$addr/net_info" | jq ".result[1].peers | length")
echo "... peers count = $peers_count, expected = $((N-1))"
done
echo "... successful"

+ 31
- 0
test/p2p/pex/dial_seeds.sh View File

@ -0,0 +1,31 @@
#! /bin/bash
set -u
N=$1
cd $GOPATH/src/github.com/tendermint/tendermint
echo "Waiting for nodes to come online"
for i in `seq 1 $N`; do
addr=$(test/p2p/ip.sh $i):46657
curl -s $addr/status > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
ERR=$?
done
echo "... node $i is up"
done
set -e
# seeds need quotes
seeds="\"$(test/p2p/ip.sh 1):46656\""
for i in `seq 2 $N`; do
seeds="$seeds,\"$(test/p2p/ip.sh $i):46656\""
done
echo $seeds
echo $seeds
IP=$(test/p2p/ip.sh 1)
curl --data-urlencode "seeds=[$seeds]" "$IP:46657/dial_seeds"

+ 15
- 0
test/p2p/pex/test.sh View File

@ -0,0 +1,15 @@
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
PROXY_APP=$4
cd $GOPATH/src/github.com/tendermint/tendermint
echo "Test reconnecting from the address book"
bash test/p2p/pex/test_addrbook.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
echo "Test connecting via /dial_seeds"
bash test/p2p/pex/test_dial_seeds.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP

+ 57
- 0
test/p2p/pex/test_addrbook.sh View File

@ -0,0 +1,57 @@
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
PROXY_APP=$4
ID=1
echo "----------------------------------------------------------------------"
echo "Testing pex creates the addrbook and uses it if seeds are not provided"
echo "(assuming peers are started with pex enabled)"
CLIENT_NAME="pex_addrbook_$ID"
echo "1. restart peer $ID"
docker stop "local_testnet_$ID"
# preserve addrbook.json
docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/addrbook.json" "/tmp/addrbook.json"
set +e #CIRCLE
docker rm -vf "local_testnet_$ID"
set -e
# NOTE that we do not provide seeds
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--pex"
docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/addrbook.json"
echo "with the following addrbook:"
cat /tmp/addrbook.json
# exec doesn't work on circle
# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/addrbook.json"
echo ""
# if the client runs forever, it means addrbook wasn't saved or was empty
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N"
echo "----------------------------------------------------------------------"
echo "Testing other peers connect to us if we have neither seeds nor the addrbook"
echo "(assuming peers are started with pex enabled)"
CLIENT_NAME="pex_no_addrbook_$ID"
echo "1. restart peer $ID"
docker stop "local_testnet_$ID"
set +e #CIRCLE
docker rm -vf "local_testnet_$ID"
set -e
# NOTE that we do not provide seeds
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--pex"
# if the client runs forever, it means other peers have removed us from their books (which should not happen)
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N"
echo ""
echo "PASS"
echo ""

+ 36
- 0
test/p2p/pex/test_dial_seeds.sh View File

@ -0,0 +1,36 @@
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
PROXY_APP=$4
ID=1
cd $GOPATH/src/github.com/tendermint/tendermint
echo "----------------------------------------------------------------------"
echo "Testing full network connection using one /dial_seeds call"
echo "(assuming peers are started with pex enabled)"
# stop the existing testnet and remove local network
set +e
bash test/p2p/local_testnet_stop.sh $NETWORK_NAME $N
set -e
# start the testnet on a local network
# NOTE we re-use the same network for all tests
SEEDS=""
bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP $SEEDS
# dial seeds from one node
CLIENT_NAME="dial_seeds"
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/pex/dial_seeds.sh $N"
# test basic connectivity and consensus
# start client container and check the num peers and height for all nodes
CLIENT_NAME="dial_seeds_basic"
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/basic/test.sh $N"

+ 12
- 0
test/p2p/seeds.sh View File

@ -0,0 +1,12 @@
#! /bin/bash
set -eu
N=$1
cd "$GOPATH/src/github.com/tendermint/tendermint"
seeds="$(test/p2p/ip.sh 1):46656"
for i in $(seq 2 $N); do
seeds="$seeds,$(test/p2p/ip.sh $i):46656"
done
echo "$seeds"

+ 13
- 8
test/p2p/test.sh View File

@ -6,28 +6,33 @@ NETWORK_NAME=local_testnet
N=4
PROXY_APP=persistent_dummy
cd $GOPATH/src/github.com/tendermint/tendermint
cd "$GOPATH/src/github.com/tendermint/tendermint"
# stop the existing testnet and remove local network
set +e
bash test/p2p/local_testnet_stop.sh $NETWORK_NAME $N
bash test/p2p/local_testnet_stop.sh "$NETWORK_NAME" "$N"
set -e
SEEDS=$(bash test/p2p/seeds.sh $N)
# start the testnet on a local network
# NOTE we re-use the same network for all tests
bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
bash test/p2p/local_testnet_start.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" "$SEEDS"
# test basic connectivity and consensus
# start client container and check the num peers and height for all nodes
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME basic "test/p2p/basic/test.sh $N"
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" basic "test/p2p/basic/test.sh $N"
# test atomic broadcast:
# start client container and test sending a tx to each node
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME ab "test/p2p/atomic_broadcast/test.sh $N"
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" ab "test/p2p/atomic_broadcast/test.sh $N"
# test fast sync (from current state of network):
# for each node, kill it and readd via fast sync
bash test/p2p/fast_sync/test.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
bash test/p2p/fast_sync/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP"
# test killing all peers 3 times
bash test/p2p/kill_all/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" 3
# test killing all peers
bash test/p2p/kill_all/test.sh $DOCKER_IMAGE $NETWORK_NAME $N 3
# test pex
bash test/p2p/pex/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP"

+ 1
- 1
test/persist/test.sh View File

@ -1,5 +1,5 @@
#! /bin/bash
cd $GOPATH/src/github.com/tendermint/tendermint
cd "$GOPATH/src/github.com/tendermint/tendermint"
bash ./test/persist/test_failure_indices.sh

+ 59
- 40
test/persist/test_failure_indices.sh View File

@ -1,98 +1,117 @@
#! /bin/bash
export TMROOT=$HOME/.tendermint_persist
rm -rf $TMROOT
rm -rf "$TMROOT"
tendermint init
# use a unix socket so we can remove it
RPC_ADDR="$(pwd)/rpc.sock"
TM_CMD="tendermint node --log_level=debug --rpc_laddr=unix://$RPC_ADDR" # &> tendermint_${name}.log"
DUMMY_CMD="dummy --persist $TMROOT/dummy" # &> dummy_${name}.log"
function start_procs(){
name=$1
indexToFail=$2
echo "Starting persistent dummy and tendermint"
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" &
if [[ "$CIRCLECI" == true ]]; then
$DUMMY_CMD &
else
$DUMMY_CMD &> "dummy_${name}.log" &
fi
PID_DUMMY=$!
# before starting tendermint, remove the rpc socket
rm $RPC_ADDR
if [[ "$indexToFail" == "" ]]; then
# run in background, dont fail
tendermint node --log_level=debug &> tendermint_${name}.log &
if [[ "$CIRCLECI" == true ]]; then
$TM_CMD &
else
$TM_CMD &> "tendermint_${name}.log" &
fi
PID_TENDERMINT=$!
else
# run in foreground, fail
FAIL_TEST_INDEX=$indexToFail tendermint node --log_level=debug &> tendermint_${name}.log
if [[ "$CIRCLECI" == true ]]; then
FAIL_TEST_INDEX=$indexToFail $TM_CMD
else
FAIL_TEST_INDEX=$indexToFail $TM_CMD &> "tendermint_${name}.log"
fi
PID_TENDERMINT=$!
fi
}
function kill_procs(){
kill -9 $PID_DUMMY $PID_TENDERMINT
wait $PID_DUMMY
wait $PID_TENDERMINT
kill -9 "$PID_DUMMY" "$PID_TENDERMINT"
wait "$PID_DUMMY"
wait "$PID_TENDERMINT"
}
# wait till node is up, send txs
function send_txs(){
addr="127.0.0.1:46657"
curl -s $addr/status > /dev/null
# wait for port to be available
function wait_for_port() {
port=$1
# this will succeed while port is bound
nc -z 127.0.0.1 $port
ERR=$?
while [ "$ERR" != 0 ]; do
i=0
while [ "$ERR" == 0 ]; do
echo "... port $port is still bound. waiting ..."
sleep 1
curl -s $addr/status > /dev/null
nc -z 127.0.0.1 $port
ERR=$?
i=$((i + 1))
if [[ $i == 10 ]]; then
echo "Timed out waiting for port to be released"
exit 1
fi
done
# send a bunch of txs over a few blocks
echo "Node is up, sending txs"
for i in `seq 1 5`; do
for j in `seq 1 100`; do
tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'`
curl -s $addr/broadcast_tx_async?tx=0x$tx &> /dev/null
done
sleep 1
done
echo "... port $port is free!"
}
failsStart=0
fails=`grep -r "fail.Fail" --include \*.go . | wc -l`
failsEnd=$(($fails-1))
fails=$(grep -r "fail.Fail" --include \*.go . | wc -l)
failsEnd=$((fails-1))
for failIndex in `seq $failsStart $failsEnd`; do
for failIndex in $(seq $failsStart $failsEnd); do
echo ""
echo "* Test FailIndex $failIndex"
# test failure at failIndex
send_txs &
start_procs 1 $failIndex
bash ./test/utils/txs.sh "localhost:46657" &
start_procs 1 "$failIndex"
# tendermint should fail when it hits the fail index
kill -9 $PID_DUMMY
wait $PID_DUMMY
# tendermint should already have exited when it hits the fail index
# but kill -9 for good measure
kill_procs
start_procs 2
# wait for node to handshake and make a new block
addr="localhost:46657"
curl -s $addr/status > /dev/null
# NOTE: --unix-socket is only available in curl v7.40+
curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null
ERR=$?
i=0
while [ "$ERR" != 0 ]; do
sleep 1
curl -s $addr/status > /dev/null
curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null
ERR=$?
i=$(($i + 1))
if [[ $i == 10 ]]; then
i=$((i + 1))
if [[ $i == 20 ]]; then
echo "Timed out waiting for tendermint to start"
exit 1
fi
done
# wait for a new block
h1=`curl -s $addr/status | jq .result[1].latest_block_height`
h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result[1].latest_block_height)
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result[1].latest_block_height)
done
kill_procs


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save