diff --git a/.circleci/config.yml b/.circleci/config.yml index 539dd7ee4..babba409d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,136 +1,126 @@ -version: 2 +version: 2.1 -defaults: &defaults - working_directory: /go/src/github.com/tendermint/tendermint - docker: - - image: circleci/golang - environment: - GOBIN: /tmp/workspace/bin - -docs_update_config: &docs_update_config - working_directory: ~/repo - docker: - - image: tendermintdev/jq_curl - environment: - AWS_REGION: us-east-1 +executors: + golang: + docker: + - image: tendermintdev/docker-tendermint-build + working_directory: /go/src/github.com/tendermint/tendermint + environment: + GOBIN: /tmp/bin + release: + machine: true + docs: + docker: + - image: tendermintdev/jq_curl + environment: + AWS_REGION: us-east-1 -release_management_docker: &release_management_docker - machine: true +commands: + run_test: + parameters: + script_path: + type: string + steps: + - attach_workspace: + at: /tmp/bin + - restore_cache: + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} + - checkout + - restore_cache: + name: "Restore go modules cache" + keys: + - go-mod-v1-{{ checksum "go.sum" }} + - run: + name: "Running test" + command: | + bash << parameters.script_path >> jobs: setup_dependencies: - <<: *defaults + executor: golang steps: - - run: mkdir -p /tmp/workspace/bin - - run: mkdir -p /tmp/workspace/profiles - checkout - restore_cache: + name: "Restore go modules cache" keys: - - v4-pkg-cache + - go-mod-v1-{{ checksum "go.sum" }} - run: - name: tools command: | - export PATH="$GOBIN:$PATH" - make get_tools + mkdir -p /tmp/bin - run: - name: binaries - command: | - export PATH="$GOBIN:$PATH" - make install install_abci - - persist_to_workspace: - root: /tmp/workspace - paths: - - bin - - profiles + name: Cache go modules + command: make go-mod-cache + - run: + name: tools + command: make tools + - run: + name: "Build binaries" + command: make install install_abci - save_cache: - key: v4-pkg-cache + name: "Save go modules cache" + key: go-mod-v1-{{ checksum "go.sum" }} paths: - - /go/pkg + - "/go/pkg/mod" - save_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + name: "Save source code cache" + key: go-src-v1-{{ .Revision }} paths: - - /go/src/github.com/tendermint/tendermint - - build_slate: - <<: *defaults - steps: - - attach_workspace: - at: /tmp/workspace - - restore_cache: - key: v4-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - - run: - name: slate docs - command: | - set -ex - export PATH="$GOBIN:$PATH" - make build-slate + - ".git" + - persist_to_workspace: + root: "/tmp/bin" + paths: + - "." test_abci_apps: - <<: *defaults + executor: golang steps: - - attach_workspace: - at: /tmp/workspace - - restore_cache: - key: v4-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - - run: - name: Run abci apps tests - command: | - export PATH="$GOBIN:$PATH" - bash abci/tests/test_app/test.sh + - run_test: + script_path: abci/tests/test_app/test.sh # if this test fails, fix it and update the docs at: # https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md test_abci_cli: - <<: *defaults + executor: golang steps: - - attach_workspace: - at: /tmp/workspace - - restore_cache: - key: v4-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - - run: - name: Run abci-cli tests - command: | - export PATH="$GOBIN:$PATH" - bash abci/tests/test_cli/test.sh + - run_test: + script_path: abci/tests/test_cli/test.sh test_apps: - <<: *defaults + executor: golang steps: - - attach_workspace: - at: /tmp/workspace - - restore_cache: - key: v4-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils - - run: - name: Run tests - command: bash test/app/test.sh + - run_test: + script_path: test/app/test.sh + + test_persistence: + executor: golang + steps: + - run_test: + script_path: test/persist/test_failure_indices.sh test_cover: - <<: *defaults + executor: golang parallelism: 4 steps: - - attach_workspace: - at: /tmp/workspace - restore_cache: - key: v4-pkg-cache + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} + - checkout - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - - run: mkdir -p /tmp/logs + name: "Restore go module cache" + keys: + - go-mod-v2-{{ checksum "go.sum" }} - run: - name: Run tests + name: "Run tests" command: | + export VERSION="$(git describe --tags --long | sed 's/v\(.*\)/\1/')" + export GO111MODULE=on + mkdir -p /tmp/logs /tmp/workspace/profiles for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do id=$(basename "$pkg") - - GO111MODULE=on go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" + go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" done - persist_to_workspace: root: /tmp/workspace @@ -139,19 +129,6 @@ jobs: - store_artifacts: path: /tmp/logs - test_persistence: - <<: *defaults - steps: - - attach_workspace: - at: /tmp/workspace - - restore_cache: - key: v4-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - - run: - name: Run tests - command: bash test/persist/test_failure_indices.sh - localnet: working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint machine: @@ -187,19 +164,22 @@ jobs: path: /home/circleci/project/test/p2p/logs upload_coverage: - <<: *defaults + executor: golang steps: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v4-pkg-cache + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} + - checkout - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + name: "Restore go module cache" + keys: + - go-mod-v2-{{ checksum "go.sum" }} - run: name: gather command: | - set -ex - echo "mode: atomic" > coverage.txt for prof in $(ls /tmp/workspace/profiles/); do tail -n +2 /tmp/workspace/profiles/"$prof" >> coverage.txt @@ -209,18 +189,22 @@ jobs: command: bash .circleci/codecov.sh -f coverage.txt deploy_docs: - <<: *docs_update_config + executor: docs steps: + - restore_cache: + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} - checkout - run: name: Trigger website build command: | curl --silent \ - --show-error \ - -X POST \ - --header "Content-Type: application/json" \ - -d "{\"branch\": \"$CIRCLE_BRANCH\"}" \ - "https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json + --show-error \ + -X POST \ + --header "Content-Type: application/json" \ + -d "{\"branch\": \"$CIRCLE_BRANCH\"}" \ + "https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json RESULT=`jq -r '.status' response.json` MESSAGE=`jq -r '.message' response.json` @@ -233,8 +217,12 @@ jobs: fi prepare_build: - <<: *defaults + executor: golang steps: + - restore_cache: + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} - checkout - run: name: Get next release number @@ -250,8 +238,7 @@ jobs: echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source - run: name: Build dependencies - command: | - make get_tools + command: make tools - persist_to_workspace: root: . paths: @@ -262,11 +249,16 @@ jobs: - "/go/pkg/mod" build_artifacts: - <<: *defaults + executor: golang parallelism: 4 steps: + - restore_cache: + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} - checkout - restore_cache: + name: "Restore release dependencies cache" keys: - v2-release-deps-{{ checksum "go.sum" }} - attach_workspace: @@ -287,13 +279,17 @@ jobs: - "tendermint_linux_amd64" release_artifacts: - <<: *defaults + executor: golang steps: + - restore_cache: + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} - checkout - attach_workspace: at: /tmp/workspace - run: - name: Deploy to GitHub + name: "Deploy to GitHub" command: | # Setting CIRCLE_TAG because we do not tag the release ourselves. source /tmp/workspace/release-version.source @@ -315,27 +311,36 @@ jobs: python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}" release_docker: - <<: *release_management_docker + machine: + image: ubuntu-1604:201903-01 steps: + - restore_cache: + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} - checkout - attach_workspace: at: /tmp/workspace - run: - name: Deploy to Docker Hub + name: "Deploy to Docker Hub" command: | # Setting CIRCLE_TAG because we do not tag the release ourselves. source /tmp/workspace/release-version.source cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER" - docker login -u "${DOCKERHUB_USER}" --password-stdin <<< "${DOCKERHUB_PASS}" + docker login -u "${DOCKERHUB_USER}" --password-stdin \<<< "${DOCKERHUB_PASS}" docker push "tendermint/tendermint" docker logout reproducible_builds: - <<: *defaults + executor: golang steps: - attach_workspace: at: /tmp/workspace + - restore_cache: + name: "Restore source code cache" + keys: + - go-src-v1-{{ .Revision }} - checkout - setup_remote_docker: docker_layer_caching: true @@ -359,6 +364,35 @@ jobs: - store_artifacts: path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz + # Test RPC implementation against the swagger documented specs + contract_tests: + working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint + machine: + image: circleci/classic:latest + environment: + GOBIN: /home/circleci/.go_workspace/bin + GOPATH: /home/circleci/.go_workspace/ + GOOS: linux + GOARCH: amd64 + parallelism: 1 + steps: + - checkout + - run: + name: Test RPC endpoints against swagger documentation + command: | + set -x + export PATH=~/.local/bin:$PATH + + # install node and dredd + ./scripts/get_nodejs.sh + + # build the binaries with a proper version of Go + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks + + # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use + go get github.com/snikch/goodman/cmd/goodman + make contract-tests + workflows: version: 2 test-suite: @@ -397,6 +431,10 @@ workflows: only: - master - /v[0-9]+\.[0-9]+/ + - contract_tests: + requires: + - setup_dependencies + release: jobs: - prepare_build diff --git a/.gitignore b/.gitignore index 10ee3099c..9e2e5a9ea 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,5 @@ terraform.tfstate.backup terraform.tfstate.d .vscode + +profile\.out diff --git a/.golangci.yml b/.golangci.yml index 17d575316..8b7fbd7ec 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,14 +9,10 @@ linters: - maligned - errcheck - interfacer - - unconvert - - goconst - unparam - - nakedret - lll - gochecknoglobals - gochecknoinits - - scopelint - stylecheck # linters-settings: # govet: @@ -29,9 +25,6 @@ linters: # suggest-new: true # dupl: # threshold: 100 -# goconst: -# min-len: 2 -# min-occurrences: 2 # depguard: # list-type: blacklist # packages: diff --git a/CHANGELOG.md b/CHANGELOG.md index 398250d33..f835811d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## v0.32.3 + +*August 28, 2019* + +@climber73 wrote the [Writing a Tendermint Core application in Java +(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md) +guide. + +Special thanks to external contributors on this release: +@gchaincl, @bluele, @climber73 + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### IMPROVEMENTS: + +- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info) +- [mempool] [\#3877](https://github.com/tendermint/tendermint/pull/3877) Make `max_tx_bytes` configurable instead of `max_msg_bytes` (@bluele) +- [privval] [\#3370](https://github.com/tendermint/tendermint/issues/3370) Refactor and simplify validator/kms connection handling. Please refer to [this comment](https://github.com/tendermint/tendermint/pull/3370#issue-257360971) for details +- [rpc] [\#3880](https://github.com/tendermint/tendermint/issues/3880) Document endpoints with `swagger`, introduce contract tests of implementation against documentation + +### BUG FIXES: + +- [config] [\#3868](https://github.com/tendermint/tendermint/issues/3868) Move misplaced `max_msg_bytes` into mempool section (@bluele) +- [rpc] [\#3910](https://github.com/tendermint/tendermint/pull/3910) Fix DATA RACE in HTTP client (@gchaincl) +- [store] [\#3893](https://github.com/tendermint/tendermint/issues/3893) Fix "Unregistered interface types.Evidence" panic + ## v0.32.2 *July 31, 2019* @@ -17,20 +44,20 @@ program](https://hackerone.com/tendermint). ### FEATURES: +- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md) +- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele) - [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors` option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors). Warning: beware of accidental name clashes. Here is the list of existing reactors: MEMPOOL, BLOCKCHAIN, CONSENSUS, EVIDENCE, PEX. -- [p2p] [\#3834](https://github.com/tendermint/tendermint/issues/3834) Do not write 'Couldn't connect to any seeds' error log if there are no seeds in config file - [rpc] [\#3818](https://github.com/tendermint/tendermint/issues/3818) Make `max_body_bytes` and `max_header_bytes` configurable(@bluele) -- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele) -- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md) +- [rpc] [\#2252](https://github.com/tendermint/tendermint/issues/2252) Add `/broadcast_evidence` endpoint to submit double signing and other types of evidence ### IMPROVEMENTS: - [abci] [\#3809](https://github.com/tendermint/tendermint/issues/3809) Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov) -- [rpc] [\#2252](https://github.com/tendermint/tendermint/issues/2252) Add `/broadcast_evidence` endpoint to submit double signing and other types of evidence - [p2p] [\#3664](https://github.com/tendermint/tendermint/issues/3664) p2p/conn: reuse buffer when write/read from secret connection(@guagualvcha) +- [p2p] [\#3834](https://github.com/tendermint/tendermint/issues/3834) Do not write 'Couldn't connect to any seeds' error log if there are no seeds in config file - [rpc] [\#3076](https://github.com/tendermint/tendermint/issues/3076) Improve transaction search performance ### BUG FIXES: diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 833fb6cc0..3ab1632e3 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,4 +1,4 @@ -## v0.32.3 +## v0.32.4 \*\* @@ -19,8 +19,10 @@ program](https://hackerone.com/tendermint). ### IMPROVEMENTS: -- [consensus] \#3839 Reduce "Error attempting to add vote" message severity (Error -> Info) +- [rpc] \#2010 Add NewHTTPWithClient and NewJSONRPCClientWithHTTPClient (note these and NewHTTP, NewJSONRPCClient functions panic if remote is invalid) (@gracenoah) +- [rpc] \#3984 Add `MempoolClient` interface to `Client` interface ### BUG FIXES: -- [config] \#3868 move misplaced `max_msg_bytes` into mempool section +- [consensus] \#3908 Wait `timeout_commit` to pass even if `create_empty_blocks` is `false` +- [mempool] \#3968 Fix memory loading error on 32-bit machines (@jon-certik) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 832156bda..1b9ea4409 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards. -Please follow standard github best practices: fork the repo, branch from the tip of `master`, make some commits, and submit a pull request to `master`. +Please follow standard github best practices: fork the repo, branch from the tip of `master`, make some commits, and submit a pull request to `master`. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with! Before making a pull request, please open an issue describing the @@ -21,16 +21,16 @@ Please make sure to use `gofmt` before every commit - the easiest way to do this Please note that Go requires code to live under absolute paths, which complicates forking. While my fork lives at `https://github.com/ebuchman/tendermint`, -the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. +the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. Instead, we use `git remote` to add the fork as a new remote for the original repo, -`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there. +`$GOPATH/src/github.com/tendermint/tendermint`, and do all the work there. For instance, to create a fork and work on a branch of it, I would: - * Create the fork on github, using the fork button. - * Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) - * `git remote rename origin upstream` - * `git remote add origin git@github.com:ebuchman/basecoin.git` +- Create the fork on github, using the fork button. +- Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) +- `git remote rename origin upstream` +- `git remote add origin git@github.com:ebuchman/basecoin.git` Now `origin` refers to my fork and `upstream` refers to the tendermint version. So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there. @@ -38,8 +38,8 @@ Of course, replace `ebuchman` with your git handle. To pull in updates from the origin repo, run - * `git fetch upstream` - * `git rebase upstream/master` (or whatever branch you want) +- `git fetch upstream` +- `git rebase upstream/master` (or whatever branch you want) ## Dependencies @@ -113,7 +113,7 @@ removed from the header in rpc responses as well. ## Branching Model and Release -The main development branch is master. +The main development branch is master. Every release is maintained in a release branch named `vX.Y.Z`. @@ -140,36 +140,35 @@ easy to reference the pull request where a change was introduced. #### Major Release -1. start on `master` +1. start on `master` 2. run integration tests (see `test_integrations` in Makefile) 3. prepare release in a pull request against `master` (to be squash merged): - - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all issues - - run `bash ./scripts/authors.sh` to get a list of authors since the latest - release, and add the github aliases of external contributors to the top of - the changelog. To lookup an alias from an email, try `bash - ./scripts/authors.sh ` - - reset the `CHANGELOG_PENDING.md` - - bump versions + - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all issues + - run `bash ./scripts/authors.sh` to get a list of authors since the latest + release, and add the github aliases of external contributors to the top of + the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - reset the `CHANGELOG_PENDING.md` + - bump versions 4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`) 5. merge back to master (don't squash merge!) #### Minor Release -If there were no breaking changes and you need to create a release nonetheless, -the procedure is almost exactly like with a new release above. +If there were no breaking changes and you need to create a release nonetheless, +the procedure is almost exactly like with a new release above. The only difference is that in the end you create a pull request against the existing `X.X` branch. The branch name should match the release number you want to create. -Merging this PR will trigger the next release. -For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag, +Merging this PR will trigger the next release. +For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag, the patch version will be incremented and the created release will be v0.34.1. #### Backport Release 1. start from the existing release branch you want to backport changes to (e.g. v0.30) -Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7) + Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7) 2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed) 3. steps 2 and 3 from [Major Release](#major-release) 4. push changes to release/vX.X.X branch @@ -183,3 +182,16 @@ If they have `.go` files in the root directory, they will be automatically tested by circle using `go test -v -race ./...`. If not, they will need a `circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and includes its continuous integration status using a badge in the `README.md`. + +### RPC Testing + +If you contribute to the RPC endpoints it's important to document your changes in the [Swagger file](./docs/spec/rpc/swagger.yaml) +To test your changes you should install `nodejs` and run: + +```bash +npm i -g dredd +make build-linux build-contract-tests-hooks +make contract-tests +``` + +This command will popup a network and check every endpoint against what has been documented diff --git a/DOCKER/Dockerfile.abci b/DOCKER/Dockerfile.abci index c6ec05f69..52a3d9e0b 100644 --- a/DOCKER/Dockerfile.abci +++ b/DOCKER/Dockerfile.abci @@ -15,7 +15,7 @@ RUN apt-get update && apt-get install -y \ COPY Gopkg.toml /go/src/github.com/tendermint/abci/ COPY Gopkg.lock /go/src/github.com/tendermint/abci/ -RUN make get_tools +RUN make tools # see https://github.com/golang/dep/issues/1312 RUN dep ensure -vendor-only diff --git a/DOCKER/Dockerfile.build_c-amazonlinux b/DOCKER/Dockerfile.build_c-amazonlinux new file mode 100644 index 000000000..64babe3ae --- /dev/null +++ b/DOCKER/Dockerfile.build_c-amazonlinux @@ -0,0 +1,28 @@ +FROM amazonlinux:2 + +RUN yum -y update && \ + yum -y install wget + +RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ + rpm -ivh epel-release-latest-7.noarch.rpm + +RUN yum -y groupinstall "Development Tools" +RUN yum -y install leveldb-devel which + +ENV GOVERSION=1.12.9 + +RUN cd /tmp && \ + wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \ + tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \ + mkdir -p /go/src && \ + mkdir -p /go/bin + +ENV PATH=$PATH:/usr/local/go/bin:/go/bin +ENV GOBIN=/go/bin +ENV GOPATH=/go/src + +RUN mkdir -p /tendermint +WORKDIR /tendermint + +CMD ["/usr/bin/make", "build_c"] + diff --git a/DOCKER/Makefile b/DOCKER/Makefile index 32510ebbb..41fb60ac8 100644 --- a/DOCKER/Makefile +++ b/DOCKER/Makefile @@ -13,4 +13,7 @@ build_testing: push_develop: docker push "tendermint/tendermint:develop" +build_amazonlinux_buildimage: + docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux . + .PHONY: build build_develop push push_develop diff --git a/Makefile b/Makefile index f16e62560..6870258f6 100644 --- a/Makefile +++ b/Makefile @@ -7,8 +7,6 @@ GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) OUTPUT?=build/tendermint -export GO111MODULE = on - INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` -s -w @@ -16,7 +14,9 @@ BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" all: check build test install -check: check_tools +# The below include contains the tools. +include scripts/devtools/Makefile +include tests.mk ######################################## ### Build Tendermint @@ -28,7 +28,7 @@ build_c: CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/ build_race: - CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint + CGO_ENABLED=1 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint install: CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint @@ -71,22 +71,6 @@ install_abci: dist: @BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'" -######################################## -### Tools & dependencies - -check_tools: - @# https://stackoverflow.com/a/25668869 - @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\ - $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" - -get_tools: - @echo "--> Installing tools" - ./scripts/get_tools.sh - -update_tools: - @echo "--> Updating tools" - ./scripts/get_tools.sh - #For ABCI and libs get_protoc: @# https://github.com/google/protobuf/releases @@ -100,6 +84,16 @@ get_protoc: cd .. && \ rm -rf protobuf-3.6.1 +go-mod-cache: go.sum + @echo "--> Download go modules to local cache" + @go mod download +.PHONY: go-mod-cache + +go.sum: go.mod + @echo "--> Ensure dependencies have not been modified" + @go mod verify + @go mod tidy + draw_deps: @# requires brew install graphviz or apt-get install graphviz go get github.com/RobotsAndPencils/goviz @@ -146,100 +140,6 @@ protoc_grpc: rpc/grpc/types.pb.go protoc_merkle: crypto/merkle/merkle.pb.go -######################################## -### Testing - -## required to be run first by most tests -build_docker_test_image: - docker build -t tester -f ./test/docker/Dockerfile . - -### coverage, app, persistence, and libs tests -test_cover: - # run the go unit tests with coverage - bash test/test_cover.sh - -test_apps: - # run the app tests using bash - # requires `abci-cli` and `tendermint` binaries installed - bash test/app/test.sh - -test_abci_apps: - bash abci/tests/test_app/test.sh - -test_abci_cli: - # test the cli against the examples in the tutorial at: - # ./docs/abci-cli.md - # if test fails, update the docs ^ - @ bash abci/tests/test_cli/test.sh - -test_persistence: - # run the persistence tests using bash - # requires `abci-cli` installed - docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh - - # TODO undockerize - # bash test/persist/test_failure_indices.sh - -test_p2p: - docker rm -f rsyslog || true - rm -rf test/logs || true - mkdir test/logs - cd test/ - docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog - cd .. - # requires 'tester' the image from above - bash test/p2p/test.sh tester - # the `docker cp` takes a really long time; uncomment for debugging - # - # mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs - -test_integrations: - make build_docker_test_image - make get_tools - make install - make test_cover - make test_apps - make test_abci_apps - make test_abci_cli - make test_libs - make test_persistence - make test_p2p - -test_release: - @go test -tags release $(PACKAGES) - -test100: - @for i in {1..100}; do make test; done - -vagrant_test: - vagrant up - vagrant ssh -c 'make test_integrations' - -### go tests -test: - @echo "--> Running go test" - @go test -p 1 $(PACKAGES) - -test_race: - @echo "--> Running go test --race" - @go test -p 1 -v -race $(PACKAGES) - -# uses https://github.com/sasha-s/go-deadlock/ to detect potential deadlocks -test_with_deadlock: - make set_with_deadlock - make test - make cleanup_after_test_with_deadlock - -set_with_deadlock: - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.RWMutex/deadlock.RWMutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.Mutex/deadlock.Mutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w - -# cleanes up after you ran test_with_deadlock -cleanup_after_test_with_deadlock: - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.RWMutex/sync.RWMutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.Mutex/sync.Mutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w ######################################## ### Formatting, linting, and vetting @@ -269,12 +169,19 @@ build-docker: ### Local testnet using docker # Build linux binary on other platforms -build-linux: get_tools +build-linux: tools GOOS=linux GOARCH=amd64 $(MAKE) build build-docker-localnode: @cd networks/local && make +# Runs `make build_c` from within an Amazon Linux (v2)-based Docker build +# container in order to build an Amazon Linux-compatible binary. Produces a +# compatible binary at ./build/tendermint +build_c-amazonlinux: + $(MAKE) -C ./DOCKER build_amazonlinux_buildimage + docker run --rm -it -v `pwd`:/tendermint tendermint/tendermint:build_c-amazonlinux + # Run a 4-node testnet locally localnet-start: localnet-stop build-docker-localnode @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --config /etc/tendermint/config-template.toml --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2; fi @@ -304,11 +211,27 @@ sentry-stop: @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" -# meant for the CI, inspect script & adapt accordingly -build-slate: - bash scripts/slate.sh +# Build hooks for dredd, to skip or add information on some steps +build-contract-tests-hooks: +ifeq ($(OS),Windows_NT) + go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests.exe ./cmd/contract_tests +else + go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests ./cmd/contract_tests +endif + +# Run a nodejs tool to test endpoints against a localnet +# The command takes care of starting and stopping the network +# prerequisits: build-contract-tests-hooks build-linux +# the two build commands were not added to let this command run from generic containers or machines. +# The binaries should be built beforehand +contract-tests: + dredd # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint +.PHONY: check build build_race build_abci dist install install_abci check_tools tools update_tools draw_deps \ + get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver fmt rpc-docs build-linux localnet-start \ + localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop protoc_grpc protoc_all \ + build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint build-contract-tests-hooks contract-tests \ + build_c-amazonlinux diff --git a/README.md b/README.md index 3ea9d5de4..d7928f668 100644 --- a/README.md +++ b/README.md @@ -74,9 +74,8 @@ and the [contributing guidelines](CONTRIBUTING.md) when submitting code. Join the larger community on the [forum](https://forum.cosmos.network/) and the [chat](https://riot.im/app/#/room/#tendermint:matrix.org). To learn more about the structure of the software, watch the [Developer -Sessions](https://www.youtube.com/playlist?list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) -and read some [Architectural -Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). +Sessions](/docs/DEV_SESSIONS.md) and read some [Architectural Decision +Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). Learn more by reading the code and comparing it to the [specification](https://github.com/tendermint/tendermint/tree/develop/docs/spec). diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 60c284333..000000000 --- a/ROADMAP.md +++ /dev/null @@ -1,23 +0,0 @@ -# Roadmap - -BREAKING CHANGES: -- Better support for injecting randomness -- Upgrade consensus for more real-time use of evidence - -FEATURES: -- Use the chain as its own CA for nodes and validators -- Tooling to run multiple blockchains/apps, possibly in a single process -- State syncing (without transaction replay) -- Add authentication and rate-limitting to the RPC - -IMPROVEMENTS: -- Improve subtleties around mempool caching and logic -- Consensus optimizations: - - cache block parts for faster agreement after round changes - - propagate block parts rarest first -- Better testing of the consensus state machine (ie. use a DSL) -- Auto compiled serialization/deserialization code instead of go-wire reflection - -BUG FIXES: -- Graceful handling/recovery for apps that have non-determinism or fail to halt -- Graceful handling/recovery for violations of safety, or liveness diff --git a/Vagrantfile b/Vagrantfile index 67de74297..3367a908d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -57,6 +57,6 @@ Vagrant.configure("2") do |config| # get all deps and tools, ready to install/test su - vagrant -c 'source /home/vagrant/.bash_profile' - su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools' + su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools' SHELL end diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 80b07ff5a..80e60fdec 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -18,6 +18,11 @@ import ( "github.com/tendermint/tendermint/abci/types" ) +const ( + testKey = "abc" + testValue = "def" +) + func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { req := types.RequestDeliverTx{Tx: tx} ar := app.DeliverTx(req) @@ -46,12 +51,12 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri func TestKVStoreKV(t *testing.T) { kvstore := NewKVStoreApplication() - key := "abc" + key := testKey value := key tx := []byte(key) testKVStore(t, kvstore, tx, key, value) - value = "def" + value = testValue tx = []byte(key + "=" + value) testKVStore(t, kvstore, tx, key, value) } @@ -62,12 +67,12 @@ func TestPersistentKVStoreKV(t *testing.T) { t.Fatal(err) } kvstore := NewPersistentKVStoreApplication(dir) - key := "abc" + key := testKey value := key tx := []byte(key) testKVStore(t, kvstore, tx, key, value) - value = "def" + value = testValue tx = []byte(key + "=" + value) testKVStore(t, kvstore, tx, key, value) } @@ -90,7 +95,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { height = int64(1) hash := []byte("foo") header := types.Header{ - Height: int64(height), + Height: height, } kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) kvstore.EndBlock(types.RequestEndBlock{Height: header.Height}) @@ -272,12 +277,12 @@ func TestClientServer(t *testing.T) { func runClientTests(t *testing.T, client abcicli.Client) { // run some tests.... - key := "abc" + key := testKey value := key tx := []byte(key) testClient(t, client, tx, key, value) - value = "def" + value = testValue tx = []byte(key + "=" + value) testClient(t, client, tx, key, value) } diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index eb2514a69..7f3550d4f 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -198,7 +198,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon } // update - return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power))) + return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, power)) } // add, update, or remove a validator diff --git a/abci/tests/test_app/test.sh b/abci/tests/test_app/test.sh index c0bdace27..0d8301831 100755 --- a/abci/tests/test_app/test.sh +++ b/abci/tests/test_app/test.sh @@ -3,9 +3,8 @@ set -e # These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it -export GO111MODULE=on - # Get the directory of where this script is. +export PATH="$GOBIN:$PATH" SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" diff --git a/abci/tests/test_cli/test.sh b/abci/tests/test_cli/test.sh index ce074f513..cc880603d 100755 --- a/abci/tests/test_cli/test.sh +++ b/abci/tests/test_cli/test.sh @@ -2,6 +2,7 @@ set -e # Get the root directory. +export PATH="$GOBIN:$PATH" SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )" diff --git a/abci/types/protoreplace/protoreplace.go b/abci/types/protoreplace/protoreplace.go index 3ea0c73da..7058a70fb 100644 --- a/abci/types/protoreplace/protoreplace.go +++ b/abci/types/protoreplace/protoreplace.go @@ -40,7 +40,7 @@ func main() { } if writeImportTime && !wroteImport { wroteImport = true - fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n") + fmt.Fprintf(outFile, "import \"github.com/tendermint/go-amino/data\"\n") } if gotPackageLine { diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index c1c33593c..0a88dbd74 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -6,13 +6,13 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/tendermint/tendermint/store" "github.com/stretchr/testify/assert" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/p2p" @@ -60,7 +60,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { - panic(cmn.ErrorWrap(err, "error start app")) + panic(errors.Wrap(err, "error start app")) } blockDB := dbm.NewMemDB() @@ -69,7 +69,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) if err != nil { - panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) + panic(errors.Wrap(err, "error constructing state from genesis file")) } // Make the BlockchainReactor itself. @@ -103,7 +103,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals state, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { - panic(cmn.ErrorWrap(err, "error apply block")) + panic(errors.Wrap(err, "error apply block")) } blockStore.SaveBlock(thisBlock, thisParts, lastCommit) @@ -246,6 +246,86 @@ func TestBadBlockStopsPeer(t *testing.T) { assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1) } +func TestBcBlockRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + request := bcBlockRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + nonResponseHeight int64 + expectErr bool + }{ + {"Valid Non-Response Message", 0, false}, + {"Valid Non-Response Message", 1, false}, + {"Invalid Non-Response Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight} + assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + request := bcStatusRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + responseHeight int64 + expectErr bool + }{ + {"Valid Response Message", 0, false}, + {"Valid Response Message", 1, false}, + {"Invalid Response Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + response := bcStatusResponseMessage{Height: tc.responseHeight} + assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + //---------------------------------------------- // utility funcs diff --git a/blockchain/v1/peer_test.go b/blockchain/v1/peer_test.go index 3c19e4efd..c35419790 100644 --- a/blockchain/v1/peer_test.go +++ b/blockchain/v1/peer_test.go @@ -125,6 +125,7 @@ func TestPeerGetAndRemoveBlock(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { // try to get the block b, err := peer.BlockAtHeight(tt.height) @@ -167,6 +168,7 @@ func TestPeerAddBlock(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { // try to get the block err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10) diff --git a/blockchain/v1/pool.go b/blockchain/v1/pool.go index 5de741305..be2edbc21 100644 --- a/blockchain/v1/pool.go +++ b/blockchain/v1/pool.go @@ -191,7 +191,7 @@ func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int { // - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1 // Determine the number of requests needed by subtracting the number of requests already made from the maximum // allowed - numNeeded := int(maxNumRequests) - len(pool.blocks) + numNeeded := maxNumRequests - len(pool.blocks) for len(pool.plannedRequests) < numNeeded { if pool.nextRequestHeight > pool.MaxPeerHeight { break diff --git a/blockchain/v1/pool_test.go b/blockchain/v1/pool_test.go index 72758d3b1..5530ecd41 100644 --- a/blockchain/v1/pool_test.go +++ b/blockchain/v1/pool_test.go @@ -77,10 +77,10 @@ func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64] bPool.MaxPeerHeight = maxH for h, p := range blocks { bPool.blocks[h] = p.id - bPool.peers[p.id].RequestSent(int64(h)) + bPool.peers[p.id].RequestSent(h) if p.create { // simulate that a block at height h has been received - _ = bPool.peers[p.id].AddBlock(types.MakeBlock(int64(h), txs, nil, nil), 100) + _ = bPool.peers[p.id].AddBlock(types.MakeBlock(h, txs, nil, nil), 100) } } return bPool @@ -159,6 +159,7 @@ func TestBlockPoolUpdatePeer(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { pool := tt.pool err := pool.UpdatePeer(tt.args.id, tt.args.height) @@ -232,6 +233,7 @@ func TestBlockPoolRemovePeer(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { tt.pool.RemovePeer(tt.args.peerID, tt.args.err) assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) @@ -272,6 +274,7 @@ func TestBlockPoolRemoveShortPeers(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { pool := tt.pool pool.removeShortPeers() @@ -317,6 +320,7 @@ func TestBlockPoolSendRequestBatch(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { resetPoolTestResults() @@ -421,6 +425,7 @@ func TestBlockPoolAddBlock(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize) assert.Equal(t, tt.errWanted, err) @@ -473,6 +478,7 @@ func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { pool := tt.pool gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers() @@ -544,6 +550,7 @@ func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse) assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) @@ -584,6 +591,7 @@ func TestProcessedCurrentHeightBlock(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { tt.pool.ProcessedCurrentHeightBlock() assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) @@ -642,6 +650,7 @@ func TestRemovePeerAtCurrentHeight(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse) assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) diff --git a/blockchain/v1/reactor_fsm_test.go b/blockchain/v1/reactor_fsm_test.go index 54e177f25..7dff5bbaf 100644 --- a/blockchain/v1/reactor_fsm_test.go +++ b/blockchain/v1/reactor_fsm_test.go @@ -140,7 +140,7 @@ func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlo data: bReactorEventData{ peerID: peerID, height: height, - block: types.MakeBlock(int64(height), txs, nil, nil), + block: types.MakeBlock(height, txs, nil, nil), length: 100}, wantState: expected, wantNewBlocks: append(prevBlocks, height), @@ -157,7 +157,7 @@ func sBlockRespEvErrored(current, expected string, data: bReactorEventData{ peerID: peerID, height: height, - block: types.MakeBlock(int64(height), txs, nil, nil), + block: types.MakeBlock(height, txs, nil, nil), length: 100}, wantState: expected, wantErr: wantErr, @@ -211,6 +211,7 @@ type testFields struct { func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) { for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { // Create test reactor testBcR := newTestReactor(tt.startingHeight) @@ -220,6 +221,7 @@ func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) { } for _, step := range tt.steps { + step := step assert.Equal(t, step.currentState, testBcR.fsm.state.name) var heightBefore int64 @@ -769,7 +771,7 @@ forLoop: for i := 0; i < int(numBlocks); i++ { // Add the makeRequestEv step periodically. - if i%int(maxRequestsPerPeer) == 0 { + if i%maxRequestsPerPeer == 0 { testSteps = append( testSteps, sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), @@ -786,7 +788,7 @@ forLoop: numBlocksReceived++ // Add the processedBlockEv step periodically. - if numBlocksReceived >= int(maxRequestsPerPeer) || height >= numBlocks { + if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks { for j := int(height) - numBlocksReceived; j < int(height); j++ { if j >= int(numBlocks) { // This is the last block that is processed, we should be in "finished" state. @@ -829,7 +831,7 @@ func makeCorrectTransitionSequenceWithRandomParameters() testFields { maxRequestsPerPeer := cmn.RandIntn(maxRequestsPerPeerTest) + 1 // Generate the maximum number of total pending requests, >= maxRequestsPerPeer. - maxPendingRequests := cmn.RandIntn(maxTotalPendingRequestsTest-int(maxRequestsPerPeer)) + maxRequestsPerPeer + maxPendingRequests := cmn.RandIntn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer // Generate the number of blocks to be synced. numBlocks := int64(cmn.RandIntn(maxNumBlocksInChainTest)) + startingHeight @@ -862,6 +864,7 @@ func TestFSMCorrectTransitionSequences(t *testing.T) { } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { // Create test reactor testBcR := newTestReactor(tt.startingHeight) @@ -871,6 +874,7 @@ func TestFSMCorrectTransitionSequences(t *testing.T) { } for _, step := range tt.steps { + step := step assert.Equal(t, step.currentState, testBcR.fsm.state.name) oldNumStatusRequests := testBcR.numStatusRequests diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index 1e334c700..00f7b0968 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/p2p" @@ -78,7 +78,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { - panic(cmn.ErrorWrap(err, "error start app")) + panic(errors.Wrap(err, "error start app")) } blockDB := dbm.NewMemDB() @@ -87,7 +87,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) if err != nil { - panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) + panic(errors.Wrap(err, "error constructing state from genesis file")) } // Make the BlockchainReactor itself. @@ -117,7 +117,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals state, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { - panic(cmn.ErrorWrap(err, "error apply block")) + panic(errors.Wrap(err, "error apply block")) } blockStore.SaveBlock(thisBlock, thisParts, lastCommit) @@ -317,6 +317,86 @@ outerFor: assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1) } +func TestBcBlockRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + request := bcBlockRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + nonResponseHeight int64 + expectErr bool + }{ + {"Valid Non-Response Message", 0, false}, + {"Valid Non-Response Message", 1, false}, + {"Invalid Non-Response Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight} + assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + request := bcStatusRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + responseHeight int64 + expectErr bool + }{ + {"Valid Response Message", 0, false}, + {"Valid Response Message", 1, false}, + {"Invalid Response Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + response := bcStatusResponseMessage{Height: tc.responseHeight} + assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + //---------------------------------------------- // utility funcs diff --git a/blockchain/v2/schedule.go b/blockchain/v2/schedule.go new file mode 100644 index 000000000..329557492 --- /dev/null +++ b/blockchain/v2/schedule.go @@ -0,0 +1,387 @@ +// nolint:unused +package v2 + +import ( + "fmt" + "math" + "math/rand" + "time" + + "github.com/tendermint/tendermint/p2p" +) + +type Event interface{} + +type blockState int + +const ( + blockStateUnknown blockState = iota + blockStateNew + blockStatePending + blockStateReceived + blockStateProcessed +) + +func (e blockState) String() string { + switch e { + case blockStateUnknown: + return "Unknown" + case blockStateNew: + return "New" + case blockStatePending: + return "Pending" + case blockStateReceived: + return "Received" + case blockStateProcessed: + return "Processed" + default: + return fmt.Sprintf("unknown blockState: %d", e) + } +} + +type peerState int + +const ( + peerStateNew = iota + peerStateReady + peerStateRemoved +) + +func (e peerState) String() string { + switch e { + case peerStateNew: + return "New" + case peerStateReady: + return "Ready" + case peerStateRemoved: + return "Removed" + default: + return fmt.Sprintf("unknown peerState: %d", e) + } +} + +type scPeer struct { + peerID p2p.ID + state peerState + height int64 + lastTouched time.Time + lastRate int64 +} + +func newScPeer(peerID p2p.ID) *scPeer { + return &scPeer{ + peerID: peerID, + state: peerStateNew, + height: -1, + lastTouched: time.Time{}, + } +} + +// The schedule is a composite data structure which allows a scheduler to keep +// track of which blocks have been scheduled into which state. +type schedule struct { + initHeight int64 + // a list of blocks in which blockState + blockStates map[int64]blockState + + // a map of peerID to schedule specific peer struct `scPeer` used to keep + // track of peer specific state + peers map[p2p.ID]*scPeer + + // a map of heights to the peer we are waiting for a response from + pendingBlocks map[int64]p2p.ID + + // the time at which a block was put in blockStatePending + pendingTime map[int64]time.Time + + // the peerID of the peer which put the block in blockStateReceived + receivedBlocks map[int64]p2p.ID +} + +func newSchedule(initHeight int64) *schedule { + sc := schedule{ + initHeight: initHeight, + blockStates: make(map[int64]blockState), + peers: make(map[p2p.ID]*scPeer), + pendingBlocks: make(map[int64]p2p.ID), + pendingTime: make(map[int64]time.Time), + receivedBlocks: make(map[int64]p2p.ID), + } + + sc.setStateAtHeight(initHeight, blockStateNew) + + return &sc +} + +func (sc *schedule) addPeer(peerID p2p.ID) error { + if _, ok := sc.peers[peerID]; ok { + return fmt.Errorf("Cannot add duplicate peer %s", peerID) + } + sc.peers[peerID] = newScPeer(peerID) + return nil +} + +func (sc *schedule) touchPeer(peerID p2p.ID, time time.Time) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Couldn't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Tried to touch peer in peerStateRemoved") + } + + peer.lastTouched = time + + return nil +} + +func (sc *schedule) removePeer(peerID p2p.ID) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Couldn't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Tried to remove peer %s in peerStateRemoved", peerID) + } + + for height, pendingPeerID := range sc.pendingBlocks { + if pendingPeerID == peerID { + sc.setStateAtHeight(height, blockStateNew) + delete(sc.pendingTime, height) + delete(sc.pendingBlocks, height) + } + } + + for height, rcvPeerID := range sc.receivedBlocks { + if rcvPeerID == peerID { + sc.setStateAtHeight(height, blockStateNew) + delete(sc.receivedBlocks, height) + } + } + + peer.state = peerStateRemoved + + return nil +} + +func (sc *schedule) setPeerHeight(peerID p2p.ID, height int64) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Can't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Cannot set peer height for a peer in peerStateRemoved") + } + + if height < peer.height { + return fmt.Errorf("Cannot move peer height lower. from %d to %d", peer.height, height) + } + + peer.height = height + peer.state = peerStateReady + for i := sc.minHeight(); i <= height; i++ { + if sc.getStateAtHeight(i) == blockStateUnknown { + sc.setStateAtHeight(i, blockStateNew) + } + } + + return nil +} + +func (sc *schedule) getStateAtHeight(height int64) blockState { + if height < sc.initHeight { + return blockStateProcessed + } else if state, ok := sc.blockStates[height]; ok { + return state + } else { + return blockStateUnknown + } +} + +func (sc *schedule) getPeersAtHeight(height int64) []*scPeer { + peers := []*scPeer{} + for _, peer := range sc.peers { + if peer.height >= height { + peers = append(peers, peer) + } + } + + return peers +} + +func (sc *schedule) peersInactiveSince(duration time.Duration, now time.Time) []p2p.ID { + peers := []p2p.ID{} + for _, peer := range sc.peers { + if now.Sub(peer.lastTouched) > duration { + peers = append(peers, peer.peerID) + } + } + + return peers +} + +func (sc *schedule) peersSlowerThan(minSpeed int64) []p2p.ID { + peers := []p2p.ID{} + for _, peer := range sc.peers { + if peer.lastRate < minSpeed { + peers = append(peers, peer.peerID) + } + } + + return peers +} + +func (sc *schedule) setStateAtHeight(height int64, state blockState) { + sc.blockStates[height] = state +} + +func (sc *schedule) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Can't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Cannot receive blocks from removed peer %s", peerID) + } + + if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { + return fmt.Errorf("Received block %d from peer %s without being requested", height, peerID) + } + + pendingTime, ok := sc.pendingTime[height] + if !ok || now.Sub(pendingTime) <= 0 { + return fmt.Errorf("Clock error. Block %d received at %s but requested at %s", + height, pendingTime, now) + } + + peer.lastRate = size / int64(now.Sub(pendingTime).Seconds()) + + sc.setStateAtHeight(height, blockStateReceived) + delete(sc.pendingBlocks, height) + delete(sc.pendingTime, height) + + sc.receivedBlocks[height] = peerID + + return nil +} + +func (sc *schedule) markPending(peerID p2p.ID, height int64, time time.Time) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Can't find peer %s", peerID) + } + + state := sc.getStateAtHeight(height) + if state != blockStateNew { + return fmt.Errorf("Block %d should be in blockStateNew but was %s", height, state) + } + + if peer.state != peerStateReady { + return fmt.Errorf("Cannot schedule %d from %s in %s", height, peerID, peer.state) + } + + if height > peer.height { + return fmt.Errorf("Cannot request height %d from peer %s who is at height %d", + height, peerID, peer.height) + } + + sc.setStateAtHeight(height, blockStatePending) + sc.pendingBlocks[height] = peerID + // XXX: to make this more accurate we can introduce a message from + // the IO routine which indicates the time the request was put on the wire + sc.pendingTime[height] = time + + return nil +} + +func (sc *schedule) markProcessed(height int64) error { + state := sc.getStateAtHeight(height) + if state != blockStateReceived { + return fmt.Errorf("Can't mark height %d received from block state %s", height, state) + } + + delete(sc.receivedBlocks, height) + + sc.setStateAtHeight(height, blockStateProcessed) + + return nil +} + +// allBlockProcessed returns true if all blocks are in blockStateProcessed and +// determines if the schedule has been completed +func (sc *schedule) allBlocksProcessed() bool { + for _, state := range sc.blockStates { + if state != blockStateProcessed { + return false + } + } + return true +} + +// highest block | state == blockStateNew +func (sc *schedule) maxHeight() int64 { + var max int64 = 0 + for height, state := range sc.blockStates { + if state == blockStateNew && height > max { + max = height + } + } + + return max +} + +// lowest block | state == blockStateNew +func (sc *schedule) minHeight() int64 { + var min int64 = math.MaxInt64 + for height, state := range sc.blockStates { + if state == blockStateNew && height < min { + min = height + } + } + + return min +} + +func (sc *schedule) pendingFrom(peerID p2p.ID) []int64 { + heights := []int64{} + for height, pendingPeerID := range sc.pendingBlocks { + if pendingPeerID == peerID { + heights = append(heights, height) + } + } + return heights +} + +func (sc *schedule) selectPeer(peers []*scPeer) *scPeer { + // FIXME: properPeerSelector + s := rand.NewSource(time.Now().Unix()) + r := rand.New(s) + + return peers[r.Intn(len(peers))] +} + +// XXX: this duplicates the logic of peersInactiveSince and peersSlowerThan +func (sc *schedule) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID { + prunable := []p2p.ID{} + for peerID, peer := range sc.peers { + if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { + prunable = append(prunable, peerID) + } + } + + return prunable +} + +func (sc *schedule) numBlockInState(targetState blockState) uint32 { + var num uint32 = 0 + for _, state := range sc.blockStates { + if state == targetState { + num++ + } + } + return num +} diff --git a/blockchain/v2/schedule_test.go b/blockchain/v2/schedule_test.go new file mode 100644 index 000000000..a1448c528 --- /dev/null +++ b/blockchain/v2/schedule_test.go @@ -0,0 +1,272 @@ +package v2 + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" +) + +func TestScheduleInit(t *testing.T) { + var ( + initHeight int64 = 5 + sc = newSchedule(initHeight) + ) + + assert.Equal(t, blockStateNew, sc.getStateAtHeight(initHeight)) + assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) + assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) +} + +func TestAddPeer(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerIDTwo p2p.ID = "2" + sc = newSchedule(initHeight) + ) + + assert.Nil(t, sc.addPeer(peerID)) + assert.Nil(t, sc.addPeer(peerIDTwo)) + assert.Error(t, sc.addPeer(peerID)) +} + +func TestTouchPeer(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + sc = newSchedule(initHeight) + now = time.Now() + ) + + assert.Error(t, sc.touchPeer(peerID, now), + "Touching an unknown peer should return errPeerNotFound") + + assert.Nil(t, sc.addPeer(peerID), + "Adding a peer should return no error") + assert.Nil(t, sc.touchPeer(peerID, now), + "Touching a peer should return no error") + + threshold := 10 * time.Second + assert.Empty(t, sc.peersInactiveSince(threshold, now.Add(9*time.Second)), + "Expected no peers to have been touched over 9 seconds") + assert.Containsf(t, sc.peersInactiveSince(threshold, now.Add(11*time.Second)), peerID, + "Expected one %s to have been touched over 10 seconds ago", peerID) +} + +func TestPeerHeight(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + sc = newSchedule(initHeight) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight)) + for i := initHeight; i <= peerHeight; i++ { + assert.Equal(t, sc.getStateAtHeight(i), blockStateNew, + "Expected all blocks to be in blockStateNew") + peerIDs := []p2p.ID{} + for _, peer := range sc.getPeersAtHeight(i) { + peerIDs = append(peerIDs, peer.peerID) + } + + assert.Containsf(t, peerIDs, peerID, + "Expected %s to have block %d", peerID, i) + } +} + +func TestTransitionPending(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerIDTwo p2p.ID = "2" + peerHeight int64 = 20 + sc = newSchedule(initHeight) + now = time.Now() + ) + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + assert.Nil(t, sc.addPeer(peerIDTwo), + "Adding a peer should return no error") + + assert.Error(t, sc.markPending(peerID, peerHeight, now), + "Expected scheduling a block from a peer in peerStateNew to fail") + + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + assert.NoError(t, sc.setPeerHeight(peerIDTwo, peerHeight), + "Expected setPeerHeight to return no error") + + assert.NoError(t, sc.markPending(peerID, peerHeight, now), + "Expected markingPending new block to succeed") + assert.Error(t, sc.markPending(peerIDTwo, peerHeight, now), + "Expected markingPending by a second peer to fail") + + assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight), + "Expected the block to to be in blockStatePending") + + assert.NoError(t, sc.removePeer(peerID), + "Expected removePeer to return no error") + + assert.Equal(t, blockStateNew, sc.getStateAtHeight(peerHeight), + "Expected the block to to be in blockStateNew") + + assert.Error(t, sc.markPending(peerID, peerHeight, now), + "Expected markingPending removed peer to fail") + + assert.NoError(t, sc.markPending(peerIDTwo, peerHeight, now), + "Expected markingPending on a ready peer to succeed") + + assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight), + "Expected the block to to be in blockStatePending") +} + +func TestTransitionReceived(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerIDTwo p2p.ID = "2" + peerHeight int64 = 20 + blockSize int64 = 1024 + sc = newSchedule(initHeight) + now = time.Now() + receivedAt = now.Add(1 * time.Second) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Expected adding peer %s to succeed", peerID) + assert.NoError(t, sc.addPeer(peerIDTwo), + "Expected adding peer %s to succeed", peerIDTwo) + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + assert.NoErrorf(t, sc.setPeerHeight(peerIDTwo, peerHeight), + "Expected setPeerHeight on %s to %d to succeed", peerIDTwo, peerHeight) + assert.NoError(t, sc.markPending(peerID, initHeight, now), + "Expected markingPending new block to succeed") + + assert.Error(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt), + "Expected marking markReceived from a non requesting peer to fail") + + assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), + "Expected marking markReceived on a pending block to succeed") + + assert.Error(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), + "Expected marking markReceived on received block to fail") + + assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockHeightReceived", initHeight) + + assert.NoErrorf(t, sc.removePeer(peerID), + "Expected removePeer removing %s to succeed", peerID) + + assert.Equalf(t, blockStateNew, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateNew", initHeight) + + assert.NoErrorf(t, sc.markPending(peerIDTwo, initHeight, now), + "Expected markingPending %d from %s to succeed", initHeight, peerIDTwo) + assert.NoErrorf(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt), + "Expected marking markReceived %d from %s to succeed", initHeight, peerIDTwo) + assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateReceived", initHeight) +} + +func TestTransitionProcessed(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + blockSize int64 = 1024 + sc = newSchedule(initHeight) + now = time.Now() + receivedAt = now.Add(1 * time.Second) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Expected adding peer %s to succeed", peerID) + assert.NoErrorf(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight on %s to %d to succeed", peerID, peerHeight) + assert.NoError(t, sc.markPending(peerID, initHeight, now), + "Expected markingPending new block to succeed") + assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), + "Expected marking markReceived on a pending block to succeed") + + assert.Error(t, sc.markProcessed(initHeight+1), + "Expected marking %d as processed to fail", initHeight+1) + assert.NoError(t, sc.markProcessed(initHeight), + "Expected marking %d as processed to succeed", initHeight) + + assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateProcessed", initHeight) + + assert.NoError(t, sc.removePeer(peerID), + "Expected removing peer %s to succeed", peerID) + + assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateProcessed", initHeight) +} + +func TestMinMaxHeight(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + sc = newSchedule(initHeight) + now = time.Now() + ) + + assert.Equal(t, initHeight, sc.minHeight(), + "Expected min height to be the initialized height") + + assert.Equal(t, initHeight, sc.maxHeight(), + "Expected max height to be the initialized height") + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + + assert.Equal(t, peerHeight, sc.maxHeight(), + "Expected max height to increase to peerHeight") + + assert.Nil(t, sc.markPending(peerID, initHeight, now.Add(1*time.Second)), + "Expected marking initHeight as pending to return no error") + + assert.Equal(t, initHeight+1, sc.minHeight(), + "Expected marking initHeight as pending to move minHeight forward") +} + +func TestPeersSlowerThan(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + blockSize int64 = 1024 + sc = newSchedule(initHeight) + now = time.Now() + receivedAt = now.Add(1 * time.Second) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + + assert.NoError(t, sc.markPending(peerID, peerHeight, now), + "Expected markingPending on to return no error") + + assert.NoError(t, sc.markReceived(peerID, peerHeight, blockSize, receivedAt), + "Expected markingPending on to return no error") + + assert.Empty(t, sc.peersSlowerThan(blockSize-1), + "expected no peers to be slower than blockSize-1 bytes/sec") + + assert.Containsf(t, sc.peersSlowerThan(blockSize+1), peerID, + "expected %s to be slower than blockSize+1 bytes/sec", peerID) +} diff --git a/cmd/contract_tests/main.go b/cmd/contract_tests/main.go new file mode 100644 index 000000000..487537824 --- /dev/null +++ b/cmd/contract_tests/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/snikch/goodman/hooks" + "github.com/snikch/goodman/transaction" +) + +func main() { + // This must be compiled beforehand and given to dredd as parameter, in the meantime the server should be running + h := hooks.NewHooks() + server := hooks.NewServer(hooks.NewHooksRunner(h)) + h.BeforeAll(func(t []*transaction.Transaction) { + fmt.Println(t[0].Name) + }) + h.BeforeEach(func(t *transaction.Transaction) { + if strings.HasPrefix(t.Name, "Tx") || + // We need a proper example of evidence to broadcast + strings.HasPrefix(t.Name, "Info > /broadcast_evidence") || + // We need a proper example of path and data + strings.HasPrefix(t.Name, "ABCI > /abci_query") || + // We need to find a way to make a transaction before starting the tests, + // that hash should replace the dummy one in hte swagger file + strings.HasPrefix(t.Name, "Info > /tx") { + t.Skip = true + fmt.Printf("%s Has been skipped\n", t.Name) + } + }) + server.Serve() + defer server.Listener.Close() + fmt.Print("FINE") +} diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index c86bced81..22af6418f 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -48,15 +48,17 @@ func main() { os.Exit(1) } - rs := privval.NewSignerServiceEndpoint(logger, *chainID, pv, dialer) - err := rs.Start() + sd := privval.NewSignerDialerEndpoint(logger, dialer) + ss := privval.NewSignerServer(sd, *chainID, pv) + + err := ss.Start() if err != nil { panic(err) } // Stop upon receiving SIGTERM or CTRL-C. cmn.TrapSignal(logger, func() { - err := rs.Stop() + err := ss.Stop() if err != nil { panic(err) } diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index d3a4ac53e..906af930b 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -4,6 +4,7 @@ import ( "fmt" "net/url" + "github.com/pkg/errors" "github.com/spf13/cobra" cmn "github.com/tendermint/tendermint/libs/common" @@ -80,7 +81,7 @@ func runProxy(cmd *cobra.Command, args []string) error { logger.Info("Constructing Verifier...") cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize) if err != nil { - return cmn.ErrorWrap(err, "constructing Verifier") + return errors.Wrap(err, "constructing Verifier") } cert.SetLogger(logger) sc := proxy.SecureClient(node, cert) @@ -88,7 +89,7 @@ func runProxy(cmd *cobra.Command, args []string) error { logger.Info("Starting proxy...") err = proxy.StartProxy(sc, listenAddr, logger, maxOpenConnections) if err != nil { - return cmn.ErrorWrap(err, "starting proxy") + return errors.Wrap(err, "starting proxy") } // Run forever diff --git a/config/config.go b/config/config.go index b00702ce6..3e4bf9340 100644 --- a/config/config.go +++ b/config/config.go @@ -2,6 +2,7 @@ package config import ( "fmt" + "net/http" "os" "path/filepath" "time" @@ -385,7 +386,7 @@ func DefaultRPCConfig() *RPCConfig { return &RPCConfig{ ListenAddress: "tcp://127.0.0.1:26657", CORSAllowedOrigins: []string{}, - CORSAllowedMethods: []string{"HEAD", "GET", "POST"}, + CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, GRPCListenAddress: "", GRPCMaxOpenConnections: 900, @@ -637,7 +638,7 @@ type MempoolConfig struct { Size int `mapstructure:"size"` MaxTxsBytes int64 `mapstructure:"max_txs_bytes"` CacheSize int `mapstructure:"cache_size"` - MaxMsgBytes int `mapstructure:"max_msg_bytes"` + MaxTxBytes int `mapstructure:"max_tx_bytes"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool @@ -651,7 +652,7 @@ func DefaultMempoolConfig() *MempoolConfig { Size: 5000, MaxTxsBytes: 1024 * 1024 * 1024, // 1GB CacheSize: 10000, - MaxMsgBytes: 1024 * 1024, // 1MB + MaxTxBytes: 1024 * 1024, // 1MB } } @@ -684,8 +685,8 @@ func (cfg *MempoolConfig) ValidateBasic() error { if cfg.CacheSize < 0 { return errors.New("cache_size can't be negative") } - if cfg.MaxMsgBytes < 0 { - return errors.New("max_msg_bytes can't be negative") + if cfg.MaxTxBytes < 0 { + return errors.New("max_tx_bytes can't be negative") } return nil } diff --git a/config/config_test.go b/config/config_test.go index 6f9e3783e..6da032d07 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,6 +1,7 @@ package config import ( + "reflect" "testing" "time" @@ -52,3 +53,116 @@ func TestTLSConfiguration(t *testing.T) { cfg.RPC.TLSKeyFile = "/abs/path/to/file.key" assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile()) } + +func TestBaseConfigValidateBasic(t *testing.T) { + cfg := TestBaseConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with log format + cfg.LogFormat = "invalid" + assert.Error(t, cfg.ValidateBasic()) +} + +func TestRPCConfigValidateBasic(t *testing.T) { + cfg := TestRPCConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "GRPCMaxOpenConnections", + "MaxOpenConnections", + "MaxSubscriptionClients", + "MaxSubscriptionsPerClient", + "TimeoutBroadcastTxCommit", + "MaxBodyBytes", + "MaxHeaderBytes", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestP2PConfigValidateBasic(t *testing.T) { + cfg := TestP2PConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "MaxNumInboundPeers", + "MaxNumOutboundPeers", + "FlushThrottleTimeout", + "MaxPacketMsgPayloadSize", + "SendRate", + "RecvRate", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestMempoolConfigValidateBasic(t *testing.T) { + cfg := TestMempoolConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "Size", + "MaxTxsBytes", + "CacheSize", + "MaxTxBytes", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestFastSyncConfigValidateBasic(t *testing.T) { + cfg := TestFastSyncConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with version + cfg.Version = "v1" + assert.NoError(t, cfg.ValidateBasic()) + + cfg.Version = "invalid" + assert.Error(t, cfg.ValidateBasic()) +} + +func TestConsensusConfigValidateBasic(t *testing.T) { + cfg := TestConsensusConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "TimeoutPropose", + "TimeoutProposeDelta", + "TimeoutPrevote", + "TimeoutPrevoteDelta", + "TimeoutPrecommit", + "TimeoutPrecommitDelta", + "TimeoutCommit", + "CreateEmptyBlocksInterval", + "PeerGossipSleepDuration", + "PeerQueryMaj23SleepDuration", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestInstrumentationConfigValidateBasic(t *testing.T) { + cfg := TestInstrumentationConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with maximum open connections + cfg.MaxOpenConnections = -1 + assert.Error(t, cfg.ValidateBasic()) +} diff --git a/config/toml.go b/config/toml.go index b79d14d91..268c96ff9 100644 --- a/config/toml.go +++ b/config/toml.go @@ -294,8 +294,9 @@ max_txs_bytes = {{ .Mempool.MaxTxsBytes }} # Size of the cache (used to filter transactions we saw earlier) in transactions cache_size = {{ .Mempool.CacheSize }} -# Limit the size of TxMessage -max_msg_bytes = {{ .Mempool.MaxMsgBytes }} +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +max_tx_bytes = {{ .Mempool.MaxTxBytes }} ##### fast sync configuration options ##### [fastsync] diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index aa205487f..168f07924 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -18,6 +18,8 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" + cstypes "github.com/tendermint/tendermint/consensus/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" @@ -632,3 +634,260 @@ func capture() { count := runtime.Stack(trace, true) fmt.Printf("Stack of %d bytes: %s\n", count, trace) } + +//------------------------------------------------------------- +// Ensure basic validation of structs is functioning + +func TestNewRoundStepMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageStep cstypes.RoundStepType + messageLastCommitRound int + expectErr bool + }{ + {"Valid Message", 0, 0, 0x01, 1, false}, + {"Invalid Message", -1, 0, 0x01, 1, true}, + {"Invalid Message", 0, -1, 0x01, 1, true}, + {"Invalid Message", 0, 0, 0x00, 1, true}, + {"Invalid Message", 0, 0, 0x00, 0, true}, + {"Invalid Message", 1, 0, 0x01, 0, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := NewRoundStepMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Step: tc.messageStep, + LastCommitRound: tc.messageLastCommitRound, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestNewValidBlockMessageValidateBasic(t *testing.T) { + testBitArray := cmn.NewBitArray(1) + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageBlockParts *cmn.BitArray + expectErr bool + }{ + {"Valid Message", 0, 0, testBitArray, false}, + {"Invalid Message", -1, 0, testBitArray, true}, + {"Invalid Message", 0, -1, testBitArray, true}, + {"Invalid Message", 0, 0, cmn.NewBitArray(0), true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := NewValidBlockMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + BlockParts: tc.messageBlockParts, + } + + message.BlockPartsHeader.Total = 1 + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestProposalPOLMessageValidateBasic(t *testing.T) { + testBitArray := cmn.NewBitArray(1) + testCases := []struct { + testName string + messageHeight int64 + messageProposalPOLRound int + messageProposalPOL *cmn.BitArray + expectErr bool + }{ + {"Valid Message", 0, 0, testBitArray, false}, + {"Invalid Message", -1, 0, testBitArray, true}, + {"Invalid Message", 0, -1, testBitArray, true}, + {"Invalid Message", 0, 0, cmn.NewBitArray(0), true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := ProposalPOLMessage{ + Height: tc.messageHeight, + ProposalPOLRound: tc.messageProposalPOLRound, + ProposalPOL: tc.messageProposalPOL, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBlockPartMessageValidateBasic(t *testing.T) { + testPart := new(types.Part) + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messagePart *types.Part + expectErr bool + }{ + {"Valid Message", 0, 0, testPart, false}, + {"Invalid Message", -1, 0, testPart, true}, + {"Invalid Message", 0, -1, testPart, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := BlockPartMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Part: tc.messagePart, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } + + message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} + message.Part.Index = -1 + + assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") +} + +func TestHasVoteMessageValidateBasic(t *testing.T) { + const ( + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 + ) + + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageType types.SignedMsgType + messageIndex int + expectErr bool + }{ + {"Valid Message", 0, 0, validSignedMsgType, 0, false}, + {"Invalid Message", -1, 0, validSignedMsgType, 0, true}, + {"Invalid Message", 0, -1, validSignedMsgType, 0, true}, + {"Invalid Message", 0, 0, invalidSignedMsgType, 0, true}, + {"Invalid Message", 0, 0, validSignedMsgType, -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := HasVoteMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Type: tc.messageType, + Index: tc.messageIndex, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { + const ( + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 + ) + + validBlockID := types.BlockID{} + invalidBlockID := types.BlockID{ + Hash: cmn.HexBytes{}, + PartsHeader: types.PartSetHeader{ + Total: -1, + Hash: cmn.HexBytes{}, + }, + } + + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageType types.SignedMsgType + messageBlockID types.BlockID + expectErr bool + }{ + {"Valid Message", 0, 0, validSignedMsgType, validBlockID, false}, + {"Invalid Message", -1, 0, validSignedMsgType, validBlockID, true}, + {"Invalid Message", 0, -1, validSignedMsgType, validBlockID, true}, + {"Invalid Message", 0, 0, invalidSignedMsgType, validBlockID, true}, + {"Invalid Message", 0, 0, validSignedMsgType, invalidBlockID, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := VoteSetMaj23Message{ + Height: tc.messageHeight, + Round: tc.messageRound, + Type: tc.messageType, + BlockID: tc.messageBlockID, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestVoteSetBitsMessageValidateBasic(t *testing.T) { + const ( + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 + ) + + validBlockID := types.BlockID{} + invalidBlockID := types.BlockID{ + Hash: cmn.HexBytes{}, + PartsHeader: types.PartSetHeader{ + Total: -1, + Hash: cmn.HexBytes{}, + }, + } + testBitArray := cmn.NewBitArray(1) + + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageType types.SignedMsgType + messageBlockID types.BlockID + messageVotes *cmn.BitArray + expectErr bool + }{ + {"Valid Message", 0, 0, validSignedMsgType, validBlockID, testBitArray, false}, + {"Invalid Message", -1, 0, validSignedMsgType, validBlockID, testBitArray, true}, + {"Invalid Message", 0, -1, validSignedMsgType, validBlockID, testBitArray, true}, + {"Invalid Message", 0, 0, invalidSignedMsgType, validBlockID, testBitArray, true}, + {"Invalid Message", 0, 0, validSignedMsgType, invalidBlockID, testBitArray, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := VoteSetBitsMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Type: tc.messageType, + // Votes: tc.messageVotes, + BlockID: tc.messageBlockID, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/consensus/replay.go b/consensus/replay.go index 43ad557f7..83c6b3d40 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -246,7 +246,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { return fmt.Errorf("Error calling Info: %v", err) } - blockHeight := int64(res.LastBlockHeight) + blockHeight := res.LastBlockHeight if blockHeight < 0 { return fmt.Errorf("Got a negative last block height (%d) from the app", blockHeight) } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 5b454248d..b308e4946 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -123,6 +123,7 @@ func TestWALCrash(t *testing.T) { } for i, tc := range testCases { + tc := tc consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i)) t.Run(tc.name, func(t *testing.T) { crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) diff --git a/consensus/state.go b/consensus/state.go index 5dfeaf907..50b5981e6 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -537,7 +537,7 @@ func (cs *ConsensusState) updateToState(state sm.State) { // We add timeoutCommit to allow transactions // to be gathered for the first block. // And alternative solution that relies on clocks: - // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) + // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) cs.StartTime = cs.config.Commit(tmtime.Now()) } else { cs.StartTime = cs.config.Commit(cs.CommitTime) @@ -756,9 +756,25 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { func (cs *ConsensusState) handleTxsAvailable() { cs.mtx.Lock() defer cs.mtx.Unlock() - // we only need to do this for round 0 - cs.enterNewRound(cs.Height, 0) - cs.enterPropose(cs.Height, 0) + + // We only need to do this for round 0. + if cs.Round != 0 { + return + } + + switch cs.Step { + case cstypes.RoundStepNewHeight: // timeoutCommit phase + if cs.needProofBlock(cs.Height) { + // enterPropose will be called by enterNewRound + return + } + + // +1ms to ensure RoundStepNewRound timeout always happens after RoundStepNewHeight + timeoutCommit := cs.StartTime.Sub(tmtime.Now()) + 1*time.Millisecond + cs.scheduleTimeout(timeoutCommit, cs.Height, 0, cstypes.RoundStepNewRound) + case cstypes.RoundStepNewRound: // after timeoutCommit + cs.enterPropose(cs.Height, 0) + } } //----------------------------------------------------------------------------- @@ -766,7 +782,7 @@ func (cs *ConsensusState) handleTxsAvailable() { // Used internally by handleTimeout and handleMsg to make state transitions // Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), -// or, if SkipTimeout==true, after receiving all precommits from (height,round-1) +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) // Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) @@ -1458,7 +1474,7 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p _, err = cdc.UnmarshalBinaryLengthPrefixedReader( cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, - int64(cs.state.ConsensusParams.Block.MaxBytes), + cs.state.ConsensusParams.Block.MaxBytes, ) if err != nil { return added, err @@ -1672,10 +1688,10 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } default: - panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. + panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-amino should prevent this. } - return + return added, err } func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { diff --git a/consensus/state_test.go b/consensus/state_test.go index 8409f2235..96547e796 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -189,7 +189,7 @@ func TestStateBadProposal(t *testing.T) { if len(stateHash) == 0 { stateHash = make([]byte, 32) } - stateHash[0] = byte((stateHash[0] + 1) % 255) + stateHash[0] = (stateHash[0] + 1) % 255 propBlock.AppHash = stateHash propBlockParts := propBlock.MakePartSet(partSize) blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} @@ -364,7 +364,7 @@ func TestStateLockNoPOL(t *testing.T) { // lets add one for a different block hash := make([]byte, len(theBlockHash)) copy(hash, theBlockHash) - hash[0] = byte((hash[0] + 1) % 255) + hash[0] = (hash[0] + 1) % 255 signAddVotes(cs1, types.PrecommitType, hash, thePartSetHeader, vs2) ensurePrecommit(voteCh, height, round) // precommit diff --git a/consensus/wal_test.go b/consensus/wal_test.go index 5cb73fb7f..82d912f3a 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -86,6 +86,8 @@ func TestWALEncoderDecoder(t *testing.T) { b := new(bytes.Buffer) for _, msg := range msgs { + msg := msg + b.Reset() enc := NewWALEncoder(b) diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 5e2a3ab12..ad101d94d 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -3,7 +3,7 @@ package merkle import ( "bytes" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) //---------------------------------------- @@ -44,11 +44,11 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er key := op.GetKey() if len(key) != 0 { if len(keys) == 0 { - return cmn.NewError("Key path has insufficient # of parts: expected no more keys but got %+v", string(key)) + return errors.Errorf("Key path has insufficient # of parts: expected no more keys but got %+v", string(key)) } lastKey := keys[len(keys)-1] if !bytes.Equal(lastKey, key) { - return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) + return errors.Errorf("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) } keys = keys[:len(keys)-1] } @@ -58,10 +58,10 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er } } if !bytes.Equal(root, args[0]) { - return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0]) + return errors.Errorf("Calculated root hash is invalid: expected %+v but got %+v", root, args[0]) } if len(keys) != 0 { - return cmn.NewError("Keypath not consumed all") + return errors.New("Keypath not consumed all") } return nil } @@ -92,7 +92,7 @@ func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { decoder := prt.decoders[pop.Type] if decoder == nil { - return nil, cmn.NewError("unrecognized proof type %v", pop.Type) + return nil, errors.Errorf("unrecognized proof type %v", pop.Type) } return decoder(pop) } @@ -102,7 +102,7 @@ func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) { for _, pop := range proof.Ops { operator, err := prt.Decode(pop) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding a proof operator") + return nil, errors.Wrap(err, "decoding a proof operator") } poz = append(poz, operator) } @@ -122,7 +122,7 @@ func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) { poz, err := prt.DecodeProof(proof) if err != nil { - return cmn.ErrorWrap(err, "decoding proof") + return errors.Wrap(err, "decoding proof") } return poz.Verify(root, keypath, args) } diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go index aec93e826..7ea67853b 100644 --- a/crypto/merkle/proof_key_path.go +++ b/crypto/merkle/proof_key_path.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) /* @@ -87,7 +87,7 @@ func (pth KeyPath) String() string { // Each key must use a known encoding. func KeyPathToKeys(path string) (keys [][]byte, err error) { if path == "" || path[0] != '/' { - return nil, cmn.NewError("key path string must start with a forward slash '/'") + return nil, errors.New("key path string must start with a forward slash '/'") } parts := strings.Split(path[1:], "/") keys = make([][]byte, len(parts)) @@ -96,13 +96,13 @@ func KeyPathToKeys(path string) (keys [][]byte, err error) { hexPart := part[2:] key, err := hex.DecodeString(hexPart) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part) + return nil, errors.Wrapf(err, "decoding hex-encoded part #%d: /%s", i, part) } keys[i] = key } else { key, err := url.PathUnescape(part) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part) + return nil, errors.Wrapf(err, "decoding url-encoded part #%d: /%s", i, part) } keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... } diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go index 247921ad5..55337b7b8 100644 --- a/crypto/merkle/proof_simple_value.go +++ b/crypto/merkle/proof_simple_value.go @@ -4,8 +4,9 @@ import ( "bytes" "fmt" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" - cmn "github.com/tendermint/tendermint/libs/common" ) const ProofOpSimpleValue = "simple:v" @@ -39,12 +40,12 @@ func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp { func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { if pop.Type != ProofOpSimpleValue { - return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) + return nil, errors.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) } var op SimpleValueOp // a bit strange as we'll discard this, but it works. err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp") } return NewSimpleValueOp(pop.Key, op.Proof), nil } @@ -64,7 +65,7 @@ func (op SimpleValueOp) String() string { func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { if len(args) != 1 { - return nil, cmn.NewError("expected 1 arg, got %v", len(args)) + return nil, errors.Errorf("expected 1 arg, got %v", len(args)) } value := args[0] hasher := tmhash.New() @@ -73,12 +74,12 @@ func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { bz := new(bytes.Buffer) // Wrap to hash the KVPair. - encodeByteSlice(bz, []byte(op.key)) // does not error - encodeByteSlice(bz, []byte(vhash)) // does not error + encodeByteSlice(bz, op.key) // does not error + encodeByteSlice(bz, vhash) // does not error kvhash := leafHash(bz.Bytes()) if !bytes.Equal(kvhash, op.Proof.LeafHash) { - return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) + return nil, errors.Errorf("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) } return [][]byte{ diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index 4de3246f1..4dc916ac9 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -3,9 +3,9 @@ package merkle import ( "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tendermint/libs/common" ) const ProofOpDomino = "test:domino" @@ -34,7 +34,7 @@ func DominoOpDecoder(pop ProofOp) (ProofOperator, error) { var op DominoOp // a bit strange as we'll discard this, but it works. err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp") } return NewDominoOp(string(pop.Key), op.Input, op.Output), nil } @@ -50,10 +50,10 @@ func (dop DominoOp) ProofOp() ProofOp { func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) { if len(input) != 1 { - return nil, cmn.NewError("Expected input of length 1") + return nil, errors.New("Expected input of length 1") } if string(input[0]) != dop.Input { - return nil, cmn.NewError("Expected input %v, got %v", + return nil, errors.Errorf("Expected input %v, got %v", dop.Input, string(input[0])) } return [][]byte{[]byte(dop.Output)}, nil diff --git a/crypto/merkle/rfc6962_test.go b/crypto/merkle/rfc6962_test.go index 52eab4228..e2fe7f617 100644 --- a/crypto/merkle/rfc6962_test.go +++ b/crypto/merkle/rfc6962_test.go @@ -56,6 +56,7 @@ func TestRFC6962Hasher(t *testing.T) { got: innerHash([]byte("N123"), []byte("N456")), }, } { + tc := tc t.Run(tc.desc, func(t *testing.T) { wantBytes, err := hex.DecodeString(tc.want) if err != nil { diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index d3be5d7ec..da32157db 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -2,10 +2,9 @@ package merkle import ( "bytes" - "errors" "fmt" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) // SimpleProof represents a simple Merkle proof. @@ -75,11 +74,11 @@ func (sp *SimpleProof) Verify(rootHash []byte, leaf []byte) error { return errors.New("Proof index cannot be negative") } if !bytes.Equal(sp.LeafHash, leafHash) { - return cmn.NewError("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) + return errors.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) } computedHash := sp.ComputeRootHash() if !bytes.Equal(computedHash, rootHash) { - return cmn.NewError("invalid root hash: wanted %X got %X", rootHash, computedHash) + return errors.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash) } return nil } diff --git a/crypto/multisig/bitarray/compact_bit_array_test.go b/crypto/multisig/bitarray/compact_bit_array_test.go index 4612ae25a..369945ff3 100644 --- a/crypto/multisig/bitarray/compact_bit_array_test.go +++ b/crypto/multisig/bitarray/compact_bit_array_test.go @@ -21,7 +21,7 @@ func randCompactBitArray(bits int) (*CompactBitArray, []byte) { } } // Set remaining bits - for i := uint8(0); i < 8-uint8(bA.ExtraBitsStored); i++ { + for i := uint8(0); i < 8-bA.ExtraBitsStored; i++ { bA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0) } return bA, src @@ -72,6 +72,7 @@ func TestJSONMarshalUnmarshal(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.bA.String(), func(t *testing.T) { bz, err := json.Marshal(tc.bA) require.NoError(t, err) @@ -131,6 +132,7 @@ func TestCompactMarshalUnmarshal(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.bA.String(), func(t *testing.T) { bz := tc.bA.CompactMarshal() @@ -165,12 +167,15 @@ func TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) { {`"______________xx"`, []int{14, 15}, []int{0, 1}}, } for tcIndex, tc := range testCases { + tc := tc + tcIndex := tcIndex t.Run(tc.marshalledBA, func(t *testing.T) { var bA *CompactBitArray err := json.Unmarshal([]byte(tc.marshalledBA), &bA) require.NoError(t, err) for i := 0; i < len(tc.bAIndex); i++ { + require.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), "tc %d, i %d", tcIndex, i) } }) diff --git a/crypto/secp256k1/secp256k1_internal_test.go b/crypto/secp256k1/secp256k1_internal_test.go index 305f12020..3103413f8 100644 --- a/crypto/secp256k1/secp256k1_internal_test.go +++ b/crypto/secp256k1/secp256k1_internal_test.go @@ -29,6 +29,7 @@ func Test_genPrivKey(t *testing.T) { {"valid because 0 < 1 < N", validOne, false}, } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { if tt.shouldPanic { require.Panics(t, func() { diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index 2488b5399..aaf8f8112 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -100,6 +100,7 @@ func TestGenPrivKeySecp256k1(t *testing.T) { {"another seed used in cosmos tests #3", []byte("")}, } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { gotPrivKey := secp256k1.GenPrivKeySecp256k1(tt.secret) require.NotNil(t, gotPrivKey) diff --git a/docs/DEV_SESSIONS.md b/docs/DEV_SESSIONS.md new file mode 100644 index 000000000..a4757d5dd --- /dev/null +++ b/docs/DEV_SESSIONS.md @@ -0,0 +1,33 @@ +# Developer Sessions + +The Tendermint Core developer call is comprised of both [Interchain +Foundation](http://interchain.io/) and [All in Bits](https://tendermint.com/) +team members discussing the development of [Tendermint +BFT](https://github.com/tendermint/tendermint) and related research. The goal +of the Tendermint Core developer calls is to provide transparency into the +decision making process, technical information, update cycles etc. + +## List + +| Date | Topic | Link(s) | +| --------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| August 2019 | Part Three: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=whyL6UrKe7I&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | +| August 2019 | Fork Accountability | [YouTube](https://www.youtube.com/watch?v=Jph-4PGtdPo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | +| July 2019 | Part Two: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=gTjG7jNNdKQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | +| July 2019 | Part One: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=C6fH_sgPJzA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | +| June 2019 | Testnet Deployments | [YouTube](https://www.youtube.com/watch?v=gYA6no7tRlM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | +| June 2019 | Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=JLBGH8yxABk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | +| June 2019 | Tendermint Rust Libraries | [YouTube](https://www.youtube.com/watch?v=-WXKdyoGHwA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | +| May 2019 | Merkle Tree Deep Dive | [YouTube](https://www.youtube.com/watch?v=L3bt2Uw8ICg&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | +| May 2019 | Remote Signer Refactor | [YouTube](https://www.youtube.com/watch?v=eUyXXEEuBzQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=12) | +| May 2019 | Introduction to Ansible | [YouTube](https://www.youtube.com/watch?v=72clQLjzPg4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=14&t=0s) | | | +| April 2019 | Tendermint State Sync Design Discussion | [YouTube](https://www.youtube.com/watch?v=4k23j2QHwrM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | +| April 2019 | ADR-036 - Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=TW2xC1LwEkE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | +| April 2019 | Verifying Distributed Algorithms | [YouTube](https://www.youtube.com/watch?v=tMd4lgPVBxE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | +| April 2019 | Byzantine Model Checker Presentation | [YouTube](https://www.youtube.com/watch?v=rdXl4VCQyow&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | +| January 2019 | Proposer Selection in Idris | [YouTube](https://www.youtube.com/watch?v=hWZdc9c1aH8&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | +| January 2019 | Current Mempool Design | [YouTube](https://www.youtube.com/watch?v=--iGIYYiLu4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | +| December 2018 | ABCI Proxy App | [YouTube](https://www.youtube.com/watch?v=s6sQ2HOVHdo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | +| October 2018 | DB Performance | [YouTube](https://www.youtube.com/watch?v=jVSNHi4l0fQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | +| October 2018 | Alternative Mempool Algorithms | [YouTube](https://www.youtube.com/watch?v=XxH5ZtM4vMM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=2) | +| October 2018 | Tendermint Termination | [YouTube](https://www.youtube.com/watch?v=YBZjecfjeIk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) | diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 4b21a4b2d..c426b8cf9 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -15,7 +15,7 @@ mkdir -p $GOPATH/src/github.com/tendermint cd $GOPATH/src/github.com/tendermint git clone https://github.com/tendermint/tendermint.git cd tendermint -make get_tools +make tools make install_abci ``` diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index eff70db68..8b97338d9 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -27,7 +27,7 @@ Then run ``` go get github.com/tendermint/tendermint cd $GOPATH/src/github.com/tendermint/tendermint -make get_tools +make tools make install_abci ``` diff --git a/docs/architecture/adr-025-commit.md b/docs/architecture/adr-025-commit.md index 6db039d43..8f68662a8 100644 --- a/docs/architecture/adr-025-commit.md +++ b/docs/architecture/adr-025-commit.md @@ -5,7 +5,8 @@ Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data. It contains a list of precommits from every validator, where the precommit includes the whole `Vote` structure. Thus each of the commit height, round, -type, and blockID are repeated for every validator, and could be deduplicated. +type, and blockID are repeated for every validator, and could be deduplicated, +leading to very significant savings in block size. ``` type Commit struct { @@ -24,21 +25,40 @@ type Vote struct { Signature []byte `json:"signature"` } ``` -References: -[#1648](https://github.com/tendermint/tendermint/issues/1648) -[#2179](https://github.com/tendermint/tendermint/issues/2179) -[#2226](https://github.com/tendermint/tendermint/issues/2226) -## Proposed Solution +The original tracking issue for this is [#1648](https://github.com/tendermint/tendermint/issues/1648). +We have discussed replacing the `Vote` type in `Commit` with a new `CommitSig` +type, which includes at minimum the vote signature. The `Vote` type will +continue to be used in the consensus reactor and elsewhere. -We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself. +A primary question is what should be included in the `CommitSig` beyond the +signature. One current constraint is that we must include a timestamp, since +this is how we calculuate BFT time, though we may be able to change this [in the +future](https://github.com/tendermint/tendermint/issues/2840). + +Other concerns here include: + +- Validator Address [#3596](https://github.com/tendermint/tendermint/issues/3596) - + Should the CommitSig include the validator address? It is very convenient to + do so, but likely not necessary. This was also discussed in [#2226](https://github.com/tendermint/tendermint/issues/2226). +- Absent Votes [#3591](https://github.com/tendermint/tendermint/issues/3591) - + How to represent absent votes? Currently they are just present as `nil` in the + Precommits list, which is actually problematic for serialization +- Other BlockIDs [#3485](https://github.com/tendermint/tendermint/issues/3485) - + How to represent votes for nil and for other block IDs? We currently allow + votes for nil and votes for alternative block ids, but just ignore them + + +## Decision + +Deduplicate the fields and introduce `CommitSig`: ``` type Commit struct { Height int64 Round int BlockID BlockID `json:"block_id"` - Precommits []*CommitSig `json:"precommits"` + Precommits []CommitSig `json:"precommits"` } type CommitSig struct { @@ -60,19 +80,54 @@ const ( ``` -Note the need for an extra byte to indicate whether the signature is for the BlockID or for nil. -This byte can also be used to indicate an absent vote, rather than using a nil object like we currently do, -which has been [problematic for compatibility between Amino and proto3](https://github.com/tendermint/go-amino/issues/260). - -Note we also continue to store the `ValidatorAddress` in the `CommitSig`. -While this still takes 20-bytes per signature, it ensures that the Commit has all -information necessary to reconstruct Vote, which simplifies mapping between Commit and Vote objects -and with debugging. It also may be necessary for the light-client to know which address a signature corresponds to if -it is trying to verify a current commit with an older validtor set. +Re the concerns outlined in the context: + +**Timestamp**: Leave the timestamp for now. Removing it and switching to +proposer based time will take more analysis and work, and will be left for a +future breaking change. In the meantime, the concerns with the current approach to +BFT time [can be +mitigated](https://github.com/tendermint/tendermint/issues/2840#issuecomment-529122431). + +**ValidatorAddress**: we include it in the `CommitSig` for now. While this +does increase the block size unecessarily (20-bytes per validator), it has some ergonomic and debugging advantages: + +- `Commit` contains everything necessary to reconstruct `[]Vote`, and doesn't depend on additional access to a `ValidatorSet` +- Lite clients can check if they know the validators in a commit without + re-downloading the validator set +- Easy to see directly in a commit which validators signed what without having + to fetch the validator set + +If and when we change the `CommitSig` again, for instance to remove the timestamp, +we can reconsider whether the ValidatorAddress should be removed. + +**Absent Votes**: we include absent votes explicitly with no Signature or +Timestamp but with the ValidatorAddress. This should resolve the serialization +issues and make it easy to see which validator's votes failed to be included. + +**Other BlockIDs**: We use a single byte to indicate which blockID a `CommitSig` +is for. The only options are: + - `Absent` - no vote received from the this validator, so no signature + - `Nil` - validator voted Nil - meaning they did not see a polka in time + - `Commit` - validator voted for this block + +Note this means we don't allow votes for any other blockIDs. If a signature is +included in a commit, it is either for nil or the correct blockID. According to +the Tendermint protocol and assumptions, there is no way for a correct validator to +precommit for a conflicting blockID in the same round an actual commit was +created. This was the consensus from +[#3485](https://github.com/tendermint/tendermint/issues/3485) + +We may want to consider supporting other blockIDs later, as a way to capture +evidence that might be helpful. We should clarify if/when/how doing so would +actually help first. To implement it, we could change the `Commit.BlockID` +field to a slice, where the first entry is the correct block ID and the other +entries are other BlockIDs that validators precommited before. The BlockIDFlag +enum can be extended to represent these additional block IDs on a per block +basis. ## Status -Proposed +Accepted ## Consequences diff --git a/docs/architecture/adr-042-state-sync.md b/docs/architecture/adr-042-state-sync.md new file mode 100644 index 000000000..d525a4974 --- /dev/null +++ b/docs/architecture/adr-042-state-sync.md @@ -0,0 +1,239 @@ +# ADR 042: State Sync Design + +## Changelog + +2019-06-27: Init by EB +2019-07-04: Follow up by brapse + +## Context +StateSync is a feature which would allow a new node to receive a +snapshot of the application state without downloading blocks or going +through consensus. Once downloaded, the node could switch to FastSync +and eventually participate in consensus. The goal of StateSync is to +facilitate setting up a new node as quickly as possible. + +## Considerations +Because Tendermint doesn't know anything about the application state, +StateSync will broker messages between nodes and through +the ABCI to an opaque applicaton. The implementation will have multiple +touch points on both the tendermint code base and ABCI application. + +* A StateSync reactor to facilitate peer communication - Tendermint +* A Set of ABCI messages to transmit application state to the reactor - Tendermint +* A Set of MultiStore APIs for exposing snapshot data to the ABCI - ABCI application +* A Storage format with validation and performance considerations - ABCI application + +### Implementation Properties +Beyond the approach, any implementation of StateSync can be evaluated +across different criteria: + +* Speed: Expected throughput of producing and consuming snapshots +* Safety: Cost of pushing invalid snapshots to a node +* Liveness: Cost of preventing a node from receiving/constructing a snapshot +* Effort: How much effort does an implementation require + +### Implementation Question +* What is the format of a snapshot + * Complete snapshot + * Ordered IAVL key ranges + * Compressed individually chunks which can be validated +* How is data validated + * Trust a peer with it's data blindly + * Trust a majority of peers + * Use light client validation to validate each chunk against consensus + produced merkle tree root +* What are the performance characteristics + * Random vs sequential reads + * How parallelizeable is the scheduling algorithm + +### Proposals +Broadly speaking there are two approaches to this problem which have had +varying degrees of discussion and progress. These approach can be +summarized as: + +**Lazy:** Where snapshots are produced dynamically at request time. This +solution would use the existing data structure. +**Eager:** Where snapshots are produced periodically and served from disk at +request time. This solution would create an auxiliary data structure +optimized for batch read/writes. + +Additionally the propsosals tend to vary on how they provide safety +properties. + +**LightClient** Where a client can aquire the merkle root from the block +headers synchronized from a trusted validator set. Subsets of the application state, +called chunks can therefore be validated on receipt to ensure each chunk +is part of the merkle root. + +**Majority of Peers** Where manifests of chunks along with checksums are +downloaded and compared against versions provided by a majority of +peers. + +#### Lazy StateSync +An [initial specification](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) was published by Alexis Sellier. +In this design, the state has a given `size` of primitive elements (like +keys or nodes), each element is assigned a number from 0 to `size-1`, +and chunks consists of a range of such elements. Ackratos raised +[some concerns](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) +about this design, somewhat specific to the IAVL tree, and mainly concerning +performance of random reads and of iterating through the tree to determine element numbers +(ie. elements aren't indexed by the element number). + +An alternative design was suggested by Jae Kwon in +[#3639](https://github.com/tendermint/tendermint/issues/3639) where chunking +happens lazily and in a dynamic way: nodes request key ranges from their peers, +and peers respond with some subset of the +requested range and with notes on how to request the rest in parallel from other +peers. Unlike chunk numbers, keys can be verified directly. And if some keys in the +range are ommitted, proofs for the range will fail to verify. +This way a node can start by requesting the entire tree from one peer, +and that peer can respond with say the first few keys, and the ranges to request +from other peers. + +Additionally, per chunk validation tends to come more naturally to the +Lazy approach since it tends to use the existing structure of the tree +(ie. keys or nodes) rather than state-sync specific chunks. Such a +design for tendermint was originally tracked in +[#828](https://github.com/tendermint/tendermint/issues/828). + +#### Eager StateSync +Warp Sync as implemented in Parity +["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly +download both blocks and state snapshots from peers. Data is carved into ~4MB +chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a +manifest file which co-ordinates the state-sync. Obtaining a correct manifest +file seems to require an honest majority of peers. This means you may not find +out the state is incorrect until you download the whole thing and compare it +with a verified block header. + +A similar solution was implemented by Binance in +[#3594](https://github.com/tendermint/tendermint/pull/3594) +based on their initial implementation in +[PR #3243](https://github.com/tendermint/tendermint/pull/3243) +and [some learnings](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit). +Note this still requires the honest majority peer assumption. + +As an eager protocol, warp-sync can efficiently compress larger, more +predicatable chunks once per snapshot and service many new peers. By +comparison lazy chunkers would have to compress each chunk at request +time. + +### Analysis of Lazy vs Eager +Lazy vs Eager have more in common than they differ. They all require +reactors on the tendermint side, a set of ABCI messages and a method for +serializing/deserializing snapshots facilitated by a SnapshotFormat. + +The biggest difference between Lazy and Eager proposals is in the +read/write patterns necessitated by serving a snapshot chunk. +Specifically, Lazy State Sync performs random reads to the underlying data +structure while Eager can optimize for sequential reads. + +This distinctin between approaches was demonstrated by Binance's +[ackratos](https://github.com/ackratos) in their implementation of [Lazy +State sync](https://github.com/tendermint/tendermint/pull/3243), The +[analysis](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/) +of the performance, and follow up implementation of [Warp +Sync](http://github.com/tendermint/tendermint/pull/3594). + +#### Compairing Security Models +There are several different security models which have been +discussed/proposed in the past but generally fall into two categories. + +Light client validation: In which the node receiving data is expected to +first perform a light client sync and have all the nessesary block +headers. Within the trusted block header (trusted in terms of from a +validator set subject to [weak +subjectivity](https://github.com/tendermint/tendermint/pull/3795)) and +can compare any subset of keys called a chunk against the merkle root. +The advantage of light client validation is that the block headers are +signed by validators which have something to lose for malicious +behaviour. If a validator were to provide an invalid proof, they can be +slashed. + +Majority of peer validation: A manifest file containing a list of chunks +along with checksums of each chunk is downloaded from a +trusted source. That source can be a community resource similar to +[sum.golang.org](https://sum.golang.org) or downloaded from the majority +of peers. One disadantage of the majority of peer security model is the +vuliberability to eclipse attacks in which a malicious users looks to +saturate a target node's peer list and produce a manufactured picture of +majority. + +A third option would be to include snapshot related data in the +block header. This could include the manifest with related checksums and be +secured through consensus. One challenge of this approach is to +ensure that creating snapshots does not put undo burden on block +propsers by synchronizing snapshot creation and block creation. One +approach to minimizing the burden is for snapshots for height +`H` to be included in block `H+n` where `n` is some `n` block away, +giving the block propser enough time to complete the snapshot +asynchronousy. + +## Proposal: Eager StateSync With Per Chunk Light Client Validation +The conclusion after some concideration of the advantages/disadvances of +eager/lazy and different security models is to produce a state sync +which eagerly produces snapshots and uses light client validation. This +approach has the performance advantages of pre-computing efficient +snapshots which can streamed to new nodes on demand using sequential IO. +Secondly, by using light client validation we cna validate each chunk on +receipt and avoid the potential eclipse attack of majority of peer based +security. + +### Implementation +Tendermint is responsible for downloading and verifying chunks of +AppState from peers. ABCI Application is responsible for taking +AppStateChunk objects from TM and constructing a valid state tree whose +root corresponds with the AppHash of syncing block. In particular we +will need implement: + +* Build new StateSync reactor brokers message transmission between the peers + and the ABCI application +* A set of ABCI Messages +* Design SnapshotFormat as an interface which can: + * validate chunks + * read/write chunks from file + * read/write chunks to/from application state store + * convert manifests into chunkRequest ABCI messages +* Implement SnapshotFormat for cosmos-hub with concrete implementation for: + * read/write chunks in a way which can be: + * parallelized across peers + * validated on receipt + * read/write to/from IAVL+ tree + +![StateSync Architecture Diagram](img/state-sync.png) + +## Implementation Path +* Create StateSync reactor based on [#3753](https://github.com/tendermint/tendermint/pull/3753) +* Design SnapshotFormat with an eye towards cosmos-hub implementation +* ABCI message to send/receive SnapshotFormat +* IAVL+ changes to support SnapshotFormat +* Deliver Warp sync (no chunk validation) +* light client implementation for weak subjectivity +* Deliver StateSync with chunk validation + +## Status + +Proposed + +## Concequences + +### Neutral + +### Positive +* Safe & performant state sync design substantiated with real world implementation experience +* General interfaces allowing application specific innovation +* Parallizable implementation trajectory with reasonable engineering effort + +### Negative +* Static Scheduling lacks opportunity for real time chunk availability optimizations + +## References +[sync: Sync current state without full replay for Applications](https://github.com/tendermint/tendermint/issues/828) - original issue +[tendermint state sync proposal](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) - Cloudhead proposal +[tendermint state sync proposal 2](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) - ackratos proposal +[proposal 2 implementation](https://github.com/tendermint/tendermint/pull/3243) - ackratos implementation +[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal +[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos +[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed + + diff --git a/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md b/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md new file mode 100644 index 000000000..066f68f7f --- /dev/null +++ b/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md @@ -0,0 +1,141 @@ +# ADR 044: Lite Client with Weak Subjectivity + +## Changelog +* 13-07-2019: Initial draft +* 14-08-2019: Address cwgoes comments + +## Context + +The concept of light clients was introduced in the Bitcoin white paper. It +describes a watcher of distributed consensus process that only validates the +consensus algorithm and not the state machine transactions within. + +Tendermint light clients allow bandwidth & compute-constrained devices, such as smartphones, low-power embedded chips, or other blockchains to +efficiently verify the consensus of a Tendermint blockchain. This forms the +basis of safe and efficient state synchronization for new network nodes and +inter-blockchain communication (where a light client of one Tendermint instance +runs in another chain's state machine). + +In a network that is expected to reliably punish validators for misbehavior +by slashing bonded stake and where the validator set changes +infrequently, clients can take advantage of this assumption to safely +synchronize a lite client without downloading the intervening headers. + +Light clients (and full nodes) operating in the Proof Of Stake context need a +trusted block height from a trusted source that is no older than 1 unbonding +window plus a configurable evidence submission synchrony bound. This is called “weak subjectivity”. + +Weak subjectivity is required in Proof of Stake blockchains because it is +costless for an attacker to buy up voting keys that are no longer bonded and +fork the network at some point in its prior history. See Vitalik’s post at +[Proof of Stake: How I Learned to Love Weak +Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/). + +Currently, Tendermint provides a lite client implementation in the +[lite](https://github.com/tendermint/tendermint/tree/master/lite) package. This +lite client implements a bisection algorithm that tries to use a binary search +to find the minimum number of block headers where the validator set voting +power changes are less than < 1/3rd. This interface does not support weak +subjectivity at this time. The Cosmos SDK also does not support counterfactual +slashing, nor does the lite client have any capacity to report evidence making +these systems *theoretically unsafe*. + +NOTE: Tendermint provides a somewhat different (stronger) light client model +than Bitcoin under eclipse, since the eclipsing node(s) can only fool the light +client if they have two-thirds of the private keys from the last root-of-trust. + +## Decision + +### The Weak Subjectivity Interface + +Add the weak subjectivity interface for when a new light client connects to the +network or when a light client that has been offline for longer than the +unbonding period connects to the network. Specifically, the node needs to +initialize the following structure before syncing from user input: + +``` +type TrustOptions struct { + // Required: only trust commits up to this old. + // Should be equal to the unbonding period minus some delta for evidence reporting. + TrustPeriod time.Duration `json:"trust-period"` + + // Option 1: TrustHeight and TrustHash can both be provided + // to force the trusting of a particular height and hash. + // If the latest trusted height/hash is more recent, then this option is + // ignored. + TrustHeight int64 `json:"trust-height"` + TrustHash []byte `json:"trust-hash"` + + // Option 2: Callback can be set to implement a confirmation + // step if the trust store is uninitialized, or expired. + Callback func(height int64, hash []byte) error +} +``` + +The expectation is the user will get this information from a trusted source +like a validator, a friend, or a secure website. A more user friendly +solution with trust tradeoffs is that we establish an https based protocol with +a default end point that populates this information. Also an on-chain registry +of roots-of-trust (e.g. on the Cosmos Hub) seems likely in the future. + +### Linear Verification + +The linear verification algorithm requires downloading all headers +between the `TrustHeight` and the `LatestHeight`. The lite client downloads the +full header for the provided `TrustHeight` and then proceeds to download `N+1` +headers and applies the [Tendermint validation +rules](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#validation) +to each block. + +### Bisecting Verification + +Bisecting Verification is a more bandwidth and compute intensive mechanism that +in the most optimistic case requires a light client to only download two block +headers to come into synchronization. + +The bisection algorithm proceeds in the following fashion. The client downloads +and verifies the full block header for `TrustHeight` and then fetches +`LatestHeight` blocker header. The client then verifies the `LatestHeight` +header. Finally the client attempts to verify the `LatestHeight` header with +voting powers taken from `NextValidatorSet` in the `TrustHeight` header. This +verification will succeed if the validators from `TrustHeight` still have > 2/3 ++1 of voting power in the `LatestHeight`. If this succeeds, the client is fully +synchronized. If this fails, then following Bisection Algorithm should be +executed. + +The Client tries to download the block at the mid-point block between +`LatestHeight` and `TrustHeight` and attempts that same algorithm as above +using `MidPointHeight` instead of `LatestHeight` and a different threshold - +1/3 +1 of voting power for *non-adjacent headers*. In the case the of failure, +recursively perform the `MidPoint` verification until success then start over +with an updated `NextValidatorSet` and `TrustHeight`. + +If the client encounters a forged header, it should submit the header along +with some other intermediate headers as the evidence of misbehavior to other +full nodes. After that, it can retry the bisection using another full node. An +optimal client will cache trusted headers from the previous run to minimize +network usage. + +--- + +Check out the formal specification +[here](https://github.com/tendermint/tendermint/blob/master/docs/spec/consensus/light-client.md). + +## Status + +Accepted. + +## Consequences + +### Positive + +* light client which is safe to use (it can go offline, but not for too long) + +### Negative + +* complexity of bisection + +### Neutral + +* social consensus can be prone to errors (for cases where a new light client + joins a network or it has been offline for too long) diff --git a/docs/architecture/img/state-sync.png b/docs/architecture/img/state-sync.png new file mode 100644 index 000000000..08b6eac43 Binary files /dev/null and b/docs/architecture/img/state-sync.png differ diff --git a/docs/guides/go-built-in.md b/docs/guides/go-built-in.md index 705022c90..96adaf885 100644 --- a/docs/guides/go-built-in.md +++ b/docs/guides/go-built-in.md @@ -448,6 +448,12 @@ defer db.Close() app := NewKVStoreApplication(db) ``` +For **Windows** users, restarting this app will make badger throw an error as it requires value log to be truncated. For more information on this, visit [here](https://github.com/dgraph-io/badger/issues/744). +This can be avoided by setting the truncate option to true, like this: +```go +db, err := badger.Open(badger.DefaultOptions("/tmp/badger").WithTruncate(true)) +``` + Then we use it to create a Tendermint Core `Node` instance: ```go diff --git a/docs/guides/go.md b/docs/guides/go.md index ada84adfc..3798c9f5e 100644 --- a/docs/guides/go.md +++ b/docs/guides/go.md @@ -388,6 +388,12 @@ defer db.Close() app := NewKVStoreApplication(db) ``` +For **Windows** users, restarting this app will make badger throw an error as it requires value log to be truncated. For more information on this, visit [here](https://github.com/dgraph-io/badger/issues/744). +This can be avoided by setting the truncate option to true, like this: +```go +db, err := badger.Open(badger.DefaultOptions("/tmp/badger").WithTruncate(true)) +``` + Then we start the ABCI server and add some signal handling to gracefully stop it upon receiving SIGTERM or Ctrl-C. Tendermint Core will act as a client, which connects to our server and send us transactions and other messages. diff --git a/docs/guides/java.md b/docs/guides/java.md new file mode 100644 index 000000000..162b40fd7 --- /dev/null +++ b/docs/guides/java.md @@ -0,0 +1,600 @@ +# Creating an application in Java + +## Guide Assumptions + +This guide is designed for beginners who want to get started with a Tendermint +Core application from scratch. It does not assume that you have any prior +experience with Tendermint Core. + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine (your application) - written in any programming language - and securely +replicates it on many machines. + +By following along with this guide, you'll create a Tendermint Core project +called kvstore, a (very) simple distributed BFT key-value store. The application (which should +implementing the blockchain interface (ABCI)) will be written in Java. + +This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). + +## Built-in app vs external app + +If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. +[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. +Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. + +If you choose another language, like we did in this guide, you have to write a separate app, +which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. +This guide will show you how to build external application using RPC server. + +Having a separate application might give you better security guarantees as two +processes would be communicating via established binary protocol. Tendermint +Core will not have access to application's state. + +## 1.1 Installing Java and Gradle + +Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). + +Verify that you have installed Java successfully: + +```sh +$ java -version +java version "12.0.2" 2019-07-16 +Java(TM) SE Runtime Environment (build 12.0.2+10) +Java HotSpot(TM) 64-Bit Server VM (build 12.0.2+10, mixed mode, sharing) +``` + +You can choose any version of Java higher or equal to 8. +This guide is written using Java SE Development Kit 12. + +Make sure you have `$JAVA_HOME` environment variable set: + +```sh +$ echo $JAVA_HOME +/Library/Java/JavaVirtualMachines/jdk-12.0.2.jdk/Contents/Home +``` + +For Gradle installation, please refer to [their official guide](https://gradle.org/install/). + +## 1.2 Creating a new Java project + +We'll start by creating a new Gradle project. + +```sh +$ export KVSTORE_HOME=~/kvstore +$ mkdir $KVSTORE_HOME +$ cd $KVSTORE_HOME +``` + +Inside the example directory run: +```sh +gradle init --dsl groovy --package io.example --project-name example --type java-application --test-framework junit +``` +This will create a new project for you. The tree of files should look like: +```sh +$ tree +. +|-- build.gradle +|-- gradle +| `-- wrapper +| |-- gradle-wrapper.jar +| `-- gradle-wrapper.properties +|-- gradlew +|-- gradlew.bat +|-- settings.gradle +`-- src + |-- main + | |-- java + | | `-- io + | | `-- example + | | `-- App.java + | `-- resources + `-- test + |-- java + | `-- io + | `-- example + | `-- AppTest.java + `-- resources +``` + +When run, this should print "Hello world." to the standard output. + +```sh +$ ./gradlew run +> Task :run +Hello world. +``` + +## 1.3 Writing a Tendermint Core application + +Tendermint Core communicates with the application through the Application +BlockChain Interface (ABCI). All message types are defined in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +This allows Tendermint Core to run applications written in any programming +language. + +### 1.3.1 Compile .proto files + +Add the following piece to the top of the `build.gradle`: +```groovy +buildscript { + repositories { + mavenCentral() + } + dependencies { + classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' + } +} +``` + +Enable the protobuf plugin in the `plugins` section of the `build.gradle`: +```groovy +plugins { + id 'com.google.protobuf' version '0.8.8' +} +``` + +Add the following code to `build.gradle`: +```groovy +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:3.7.1" + } + plugins { + grpc { + artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + } +} +``` + +Now we should be ready to compile the `*.proto` files. + + +Copy the necessary `.proto` files to your project: +```sh +mkdir -p \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common \ + $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto + +cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto +cp $GOPATH/src/github.com/tendermint/tendermint/libs/common/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common/types.proto +cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ + $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto +``` + +Add these dependencies to `build.gradle`: +```groovy +dependencies { + implementation 'io.grpc:grpc-protobuf:1.22.1' + implementation 'io.grpc:grpc-netty-shaded:1.22.1' + implementation 'io.grpc:grpc-stub:1.22.1' +} +``` + +To generate all protobuf-type classes run: +```sh +./gradlew generateProto +``` +To verify that everything went smoothly, you can inspect the `build/generated/` directory: +```sh +$ tree build/generated/ +build/generated/ +|-- source +| `-- proto +| `-- main +| |-- grpc +| | `-- types +| | `-- ABCIApplicationGrpc.java +| `-- java +| |-- com +| | `-- google +| | `-- protobuf +| | `-- GoGoProtos.java +| |-- common +| | `-- Types.java +| |-- merkle +| | `-- Merkle.java +| `-- types +| `-- Types.java +``` + +### 1.3.2 Implementing ABCI + +The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file +contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. + +Create `$KVSTORE_HOME/src/main/java/io/example/KVStoreApp.java` file with the following content: +```java +package io.example; + +import io.grpc.stub.StreamObserver; +import types.ABCIApplicationGrpc; +import types.Types.*; + +class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { + + // methods implementation + +} +``` + +Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding +required business logic. + +### 1.3.3 CheckTx + +When a new transaction is added to the Tendermint Core, it will ask the +application to check it (validate the format, signatures, etc.). + +```java +@Override +public void checkTx(RequestCheckTx req, StreamObserver responseObserver) { + var tx = req.getTx(); + int code = validate(tx); + var resp = ResponseCheckTx.newBuilder() + .setCode(code) + .setGasWanted(1) + .build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} + +private int validate(ByteString tx) { + List parts = split(tx, '='); + if (parts.size() != 2) { + return 1; + } + byte[] key = parts.get(0); + byte[] value = parts.get(1); + + // check if the same key=value already exists + var stored = getPersistedValue(key); + if (stored != null && Arrays.equals(stored, value)) { + return 2; + } + + return 0; +} + +private List split(ByteString tx, char separator) { + var arr = tx.toByteArray(); + int i; + for (i = 0; i < tx.size(); i++) { + if (arr[i] == (byte)separator) { + break; + } + } + if (i == tx.size()) { + return Collections.emptyList(); + } + return List.of( + tx.substring(0, i).toByteArray(), + tx.substring(i + 1).toByteArray() + ); +} +``` + +Don't worry if this does not compile yet. + +If the transaction does not have a form of `{bytes}={bytes}`, we return `1` +code. When the same key=value already exist (same key and value), we return `2` +code. For others, we return a zero code indicating that they are valid. + +Note that anything with non-zero code will be considered invalid (`-1`, `100`, +etc.) by Tendermint Core. + +Valid transactions will eventually be committed given they are not too big and +have enough gas. To learn more about gas, check out ["the +specification"](https://tendermint.com/docs/spec/abci/apps.html#gas). + +For the underlying key-value store we'll use +[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. + +`build.gradle`: +```groovy +dependencies { + implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' +} +``` + +```java +... +import jetbrains.exodus.ArrayByteIterable; +import jetbrains.exodus.ByteIterable; +import jetbrains.exodus.env.Environment; +import jetbrains.exodus.env.Store; +import jetbrains.exodus.env.StoreConfig; +import jetbrains.exodus.env.Transaction; + +class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { + private Environment env; + private Transaction txn = null; + private Store store = null; + + KVStoreApp(Environment env) { + this.env = env; + } + + ... + + private byte[] getPersistedValue(byte[] k) { + return env.computeInReadonlyTransaction(txn -> { + var store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); + ByteIterable byteIterable = store.get(txn, new ArrayByteIterable(k)); + if (byteIterable == null) { + return null; + } + return byteIterable.getBytesUnsafe(); + }); + } +} +``` + +### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit + +When Tendermint Core has decided on the block, it's transferred to the +application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and +`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the +responses are expected to come in order. + +```java +@Override +public void beginBlock(RequestBeginBlock req, StreamObserver responseObserver) { + txn = env.beginTransaction(); + store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); + var resp = ResponseBeginBlock.newBuilder().build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} +``` +Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. + +```java +@Override +public void deliverTx(RequestDeliverTx req, StreamObserver responseObserver) { + var tx = req.getTx(); + int code = validate(tx); + if (code == 0) { + List parts = split(tx, '='); + var key = new ArrayByteIterable(parts.get(0)); + var value = new ArrayByteIterable(parts.get(1)); + store.put(txn, key, value); + } + var resp = ResponseDeliverTx.newBuilder() + .setCode(code) + .build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} +``` + +If the transaction is badly formatted or the same key=value already exist, we +again return the non-zero code. Otherwise, we add it to the store. + +In the current design, a block can include incorrect transactions (those who +passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer +directly). This is done for performance reasons. + +Note we can't commit transactions inside the `DeliverTx` because in such case +`Query`, which may be called in parallel, will return inconsistent data (i.e. +it will report that some value already exist even when the actual block was not +yet committed). + +`Commit` instructs the application to persist the new state. + +```java +@Override +public void commit(RequestCommit req, StreamObserver responseObserver) { + txn.commit(); + var resp = ResponseCommit.newBuilder() + .setData(ByteString.copyFrom(new byte[8])) + .build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} +``` + +### 1.3.5 Query + +Now, when the client wants to know whenever a particular key/value exist, it +will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call +the application's `Query` method. + +Applications are free to provide their own APIs. But by using Tendermint Core +as a proxy, clients (including [light client +package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage +the unified API across different applications. Plus they won't have to call the +otherwise separate Tendermint Core API for additional proofs. + +Note we don't include a proof here. + +```java +@Override +public void query(RequestQuery req, StreamObserver responseObserver) { + var k = req.getData().toByteArray(); + var v = getPersistedValue(k); + var builder = ResponseQuery.newBuilder(); + if (v == null) { + builder.setLog("does not exist"); + } else { + builder.setLog("exists"); + builder.setKey(ByteString.copyFrom(k)); + builder.setValue(ByteString.copyFrom(v)); + } + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); +} +``` + +The complete specification can be found +[here](https://tendermint.com/docs/spec/abci/). + +## 1.4 Starting an application and a Tendermint Core instances + +Put the following code into the `$KVSTORE_HOME/src/main/java/io/example/App.java` file: + +```java +package io.example; + +import jetbrains.exodus.env.Environment; +import jetbrains.exodus.env.Environments; + +import java.io.IOException; + +public class App { + public static void main(String[] args) throws IOException, InterruptedException { + try (Environment env = Environments.newInstance("tmp/storage")) { + var app = new KVStoreApp(env); + var server = new GrpcServer(app, 26658); + server.start(); + server.blockUntilShutdown(); + } + } +} +``` + +It is the entry point of the application. +Here we create a special object `Environment`, which knows where to store the application state. +Then we create and start the gRPC server to handle Tendermint Core requests. + +Create the `$KVSTORE_HOME/src/main/java/io/example/GrpcServer.java` file with the following content: +```java +package io.example; + +import io.grpc.BindableService; +import io.grpc.Server; +import io.grpc.ServerBuilder; + +import java.io.IOException; + +class GrpcServer { + private Server server; + + GrpcServer(BindableService service, int port) { + this.server = ServerBuilder.forPort(port) + .addService(service) + .build(); + } + + void start() throws IOException { + server.start(); + System.out.println("gRPC server started, listening on $port"); + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + System.out.println("shutting down gRPC server since JVM is shutting down"); + GrpcServer.this.stop(); + System.out.println("server shut down"); + })); + } + + private void stop() { + server.shutdown(); + } + + /** + * Await termination on the main thread since the grpc library uses daemon threads. + */ + void blockUntilShutdown() throws InterruptedException { + server.awaitTermination(); + } +} +``` + +## 1.5 Getting Up and Running + +To create a default configuration, nodeKey and private validator files, let's +execute `tendermint init`. But before we do that, we will need to install +Tendermint Core. + +```sh +$ rm -rf /tmp/example +$ cd $GOPATH/src/github.com/tendermint/tendermint +$ make install +$ TMHOME="/tmp/example" tendermint init + +I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json +I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json +I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json +``` + +Feel free to explore the generated files, which can be found at +`/tmp/example/config` directory. Documentation on the config can be found +[here](https://tendermint.com/docs/tendermint-core/configuration.html). + +We are ready to start our application: + +```sh +./gradlew run + +gRPC server started, listening on 26658 +``` + +Then we need to start Tendermint Core and point it to our application. Staying +within the application directory execute: + +```sh +$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 + +I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 +I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node +I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" +I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 +I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 +``` + +Now open another tab in your terminal and try sending a transaction: + +```sh +$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "gasWanted": "1" + }, + "deliver_tx": {}, + "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", + "height": "33" +} +``` + +Response should contain the height where this transaction was committed. + +Now let's check if the given key now exists and its value: + +```sh +$ curl -s 'localhost:26657/abci_query?data="tendermint"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "response": { + "log": "exists", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3My" + } + } +} +``` + +`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. + +## Outro + +I hope everything went smoothly and your first, but hopefully not the last, +Tendermint Core application is up and running. If not, please [open an issue on +Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig +deeper, read [the docs](https://tendermint.com/docs/). + +The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-java). diff --git a/docs/guides/kotlin.md b/docs/guides/kotlin.md index 8f462bd61..fa9e10b35 100644 --- a/docs/guides/kotlin.md +++ b/docs/guides/kotlin.md @@ -22,9 +22,9 @@ If you use Golang, you can run your app and Tendermint Core in the same process [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. -If you choose another language, like we did in this guide, you have to write a separate app using -either plain socket or gRPC. This guide will show you how to build external applicationg -using RPC server. +If you choose another language, like we did in this guide, you have to write a separate app, +which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. +This guide will show you how to build external application using RPC server. Having a separate application might give you better security guarantees as two processes would be communicating via established binary protocol. Tendermint @@ -34,7 +34,7 @@ Core will not have access to application's state. Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). -Verify that you have installed Java successully: +Verify that you have installed Java successfully: ```sh $ java -version @@ -69,7 +69,7 @@ Inside the example directory run: ```sh gradle init --dsl groovy --package io.example --project-name example --type kotlin-application ``` -That Gradle command will create project structure for you: +This will create a new project for you. The tree of files should look like: ```sh $ tree . @@ -114,7 +114,7 @@ language. ### 1.3.1 Compile .proto files -Add folowing to the top of `build.gradle`: +Add the following piece to the top of the `build.gradle`: ```groovy buildscript { repositories { @@ -126,14 +126,14 @@ buildscript { } ``` -Enable protobuf plugin in `plugins` section of `build.gradle`: +Enable the protobuf plugin in the `plugins` section of the `build.gradle`: ```groovy plugins { id 'com.google.protobuf' version '0.8.8' } ``` -Add following to `build.gradle`: +Add the following code to `build.gradle`: ```groovy protobuf { protoc { @@ -152,10 +152,10 @@ protobuf { } ``` -Now your project is ready to compile `*.proto` files. +Now we should be ready to compile the `*.proto` files. -Copy necessary .proto files to your project: +Copy the necessary `.proto` files to your project: ```sh mkdir -p \ $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \ @@ -173,7 +173,7 @@ cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto ``` -Add dependency to `build.gradle`: +Add these dependencies to `build.gradle`: ```groovy dependencies { implementation 'io.grpc:grpc-protobuf:1.22.1' @@ -186,7 +186,7 @@ To generate all protobuf-type classes run: ```sh ./gradlew generateProto ``` -It will produce java classes to `build/generated/`: +To verify that everything went smoothly, you can inspect the `build/generated/` directory: ```sh $ tree build/generated/ build/generated/ @@ -211,11 +211,10 @@ build/generated/ ### 1.3.2 Implementing ABCI -As you can see there is a generated file `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java`. -which contains an abstract class `ABCIApplicationImplBase`. This class fully describes the ABCI interface. -All you need is implement this interface. +The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file +contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. -Create file `$KVSTORE_HOME/src/main/kotlin/io/example/KVStoreApp.kt` with following context: +Create `$KVSTORE_HOME/src/main/kotlin/io/example/KVStoreApp.kt` file with the following content: ```kotlin package io.example @@ -296,7 +295,7 @@ For the underlying key-value store we'll use `build.gradle`: ```groovy dependencies { - implementation "org.jetbrains.xodus:xodus-environment:1.3.91" + implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' } ``` @@ -316,14 +315,21 @@ class KVStoreApp( private var store: Store? = null ... + + private fun getPersistedValue(k: ByteArray): ByteArray? { + return env.computeInReadonlyTransaction { txn -> + val store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn) + store.get(txn, ArrayByteIterable(k))?.bytesUnsafe + } + } } ``` ### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit -When Tendermint Core has decided on the block, it's transfered to the +When Tendermint Core has decided on the block, it's transferred to the application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and -`EndBlock` in the end. DeliverTx are being transfered asynchronously, but the +`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the responses are expected to come in order. ```kotlin @@ -335,7 +341,7 @@ override fun beginBlock(req: RequestBeginBlock, responseObserver: StreamObserver responseObserver.onCompleted() } ``` -Here we start new transaction, which will store block's transactions, and open corresponding store. +Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. ```kotlin override fun deliverTx(req: RequestDeliverTx, responseObserver: StreamObserver) { @@ -355,10 +361,10 @@ override fun deliverTx(req: RequestDeliverTx, responseObserver: StreamObserver - val store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn) - store.get(txn, ArrayByteIterable(k))?.bytesUnsafe - } -} ``` The complete specification can be found @@ -440,10 +439,10 @@ fun main() { ``` It is the entry point of the application. -Here we create special object `Environment` which knows where to store state of the application. -Then we create and srart gRPC server to handle Tendermint's requests. +Here we create a special object `Environment`, which knows where to store the application state. +Then we create and start the gRPC server to handle Tendermint Core requests. -Create file `$KVSTORE_HOME/src/main/kotlin/io/example/GrpcServer.kt`: +Create `$KVSTORE_HOME/src/main/kotlin/io/example/GrpcServer.kt` file with the following content: ```kotlin package io.example diff --git a/docs/introduction/install.md b/docs/introduction/install.md index 0a013bed1..39825dae3 100644 --- a/docs/introduction/install.md +++ b/docs/introduction/install.md @@ -28,7 +28,7 @@ cd tendermint ### Get Tools & Dependencies ``` -make get_tools +make tools ``` ### Compile diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md index 5d4a678d4..528f92634 100644 --- a/docs/spec/abci/apps.md +++ b/docs/spec/abci/apps.md @@ -66,7 +66,7 @@ After `Commit`, CheckTx is run again on all transactions that remain in the node's local mempool after filtering those included in the block. To prevent the mempool from rechecking all transactions every time a block is committed, set the configuration option `mempool.recheck=false`. As of Tendermint v0.32.1, -an additional `Type` parameter is made available to the CheckTx function that +an additional `Type` parameter is made available to the CheckTx function that indicates whether an incoming transaction is new (`CheckTxType_New`), or a recheck (`CheckTxType_Recheck`). @@ -211,7 +211,7 @@ message PubKey { The `pub_key` currently supports only one type: -- `type = "ed25519" and`data = ` +- `type = "ed25519"` and `data = ` The `power` is the new voting power for the validator, with the following rules: diff --git a/docs/spec/consensus/fork-accountability.md b/docs/spec/consensus/fork-accountability.md new file mode 100644 index 000000000..a6af8a1ba --- /dev/null +++ b/docs/spec/consensus/fork-accountability.md @@ -0,0 +1,319 @@ +# Fork accountability -- Problem statement and attacks + +## Problem Statement + +Tendermint consensus guarantees the following specifications for all heights: +* agreement -- no two correct full nodes decide differently. +* validity -- the decided block satisfies the predefined predicate *valid()*. +* termination -- all correct full nodes eventually decide, + +if the +faulty validators have at most 1/3 of voting power in the current validator set. In the case where this assumption +does not hold, each of the specification may be violated. + +The agreement property says that for a given height, any two correct validators that decide on a block for that height decide on the same block. That the block was indeed generated by the blockchain, can be verified starting from a trusted (genesis) block, and checking that all subsequent blocks are properly signed. + + +However, faulty nodes may forge blocks and try to convince users (lite clients) that the blocks had been correctly generated. In addition, Tendermint agreement might be violated in the case where more than 1/3 of the voting power belongs to faulty validators: Two correct validators decide on different blocks. The latter case motivates the term "fork": as Tendermint consensus also agrees on the next validator set, correct validators may have decided on disjoint next validator sets, and the chain branches into two or more partitions (possibly having faulty validators in common) and each branch continues to generate blocks independently of the other. + +We say that a fork is a case in which there are two commits for different blocks at the same height of the blockchain. The proplem is to ensure that in those cases we are able to detect faulty validators (and not mistakenly accuse correct validators), and incentivize therefore validators to behave according to the protocol specification. + +**Conceptual Limit.** In order to prove misbehavior of a node, we have to show that the behavior deviates from correct behavior with respect to a given algorithm. Thus, an algorithm that detects misbehavior of nodes executing some algorithm *A* must be defined with respect to algorithm *A*. In our case, *A* is Tendermint consensus (+ other protocols in the infrastructure; e.g.,full nodes and the Lite Client). If the consensus algorithm is changed/updated/optimized in the future, we have to check whether changes to the accountability algorithm are also required. All the discussions in this document are thus inherently specific to Tendermint consensus and the Lite Client specification. + +**Q:** Should we distinguish agreement for validators and full nodes for agreement? The case where all correct validators agree on a block, but a correct full node decides on a different block seems to be slightly less severe that the case where two correct validators decide on different blocks. Still, if a contaminated full node becomes validator that may be problematic later on. Also it is not clear how gossiping is impaired if a contaminated full node is on a different branch. + +*Remark.* In the case more than 1/3 of the voting power belongs to faulty validators, also validity and termination can be broken. Termination can be broken if faulty processes just do not send the messages that are needed to make progress. Due to asynchrony, this is not punishable, because faulty validators can always claim they never received the messages that would have forced them to send messages. + + +## The Misbehavior of Faulty Validators + +Forks are the result of faulty validators deviating from the protocol. In principle several such deviations can be detected without a fork actually occurring: + +1. double proposal: A faulty proposer proposes two different values (blocks) for the same height and the same round in Tendermint consensus. + +2. double signing: Tendermint consensus forces correct validators to prevote and precommit for at most one value per round. In case a faulty validator sends multiple prevote and/or precommit messages for different values for the same height/round, this is a misbehavior. + +3. lunatic validator: Tendermint consensus forces correct validators to prevote and precommit only for values *v* that satisfy *valid(v)*. If faulty validators prevote and precommit for *v* although *valid(v)=false* this is misbehavior. + +*Remark.* In isolation, Point 3 is an attack on validity (rather than agreement). However, the prevotes and precommits can then also be used to forge blocks. + +1. amnesia: Tendermint consensus has a locking mechanism. If a validator has some value v locked, then it can only prevote/precommit for v or nil. Sending prevote/precomit message for a different value v' (that is not nil) while holding lock on value v is misbehavior. + +2. spurious messages: In Tendermint consensus most of the message send instructions are guarded by threshold guards, e.g., one needs to receive *2f + 1* prevote messages to send precommit. Faulty validators may send precommit without having received the prevote messages. + + +Independently of a fork happening, punishing this behavior might be important to prevent forks altogether. This should keep attackers from misbehaving: if at most 1/3 of the voting power is faulty, this misbehavior is detectable but will not lead to a safety violation. Thus, unless they have more than 1/3 (or in some cases more than 2/3) of the voting power attackers have the incentive to not misbehave. If attackers control too much voting power, we have to deal with forks, as discussed in this document. + + +## Two types of forks + +* Fork-Full. Two correct validators decide on different blocks for the same height. Since also the next validator sets are decided upon, the correct validators may be partitioned to participate in two distinct branches of the forked chain. + +As in this case we have two different blocks (both having the same right/no right to exist), a central system invariant (one block per height decided by correct validators) is violated. As full nodes are contaminated in this case, the contamination can spread also to lite clients. However, even without breaking this system invariant, lite clients can be subject to a fork: + +* Fork-Lite. All correct validators decide on the same block for height *h*, but faulty processes (validators or not), forge a different block for that height, in order to fool users (who use the lite client). + + +# Attack scenarios + +## On-chain attacks + +### Equivocation (one round) + +There are several scenarios in which forks might happen. The first is double signing within a round. + +* F1. Equivocation: faulty validators sign multiple vote messages (prevote and/or precommit) for different values *during the same round r* at a given height h. + + +### Flip-flopping + +Tendermint consensus implements a locking mechanism: If a correct validator *p* receives proposal for value v and *2f + 1* prevotes for a value *id(v)* in round *r*, it locks *v* and remembers *r*. In this case, *p* also sends a precommit message for *id(v)*, which later may serve as proof that *p* locked *v*. +In subsequent rounds, *p* only sends prevote messages for a value it had previously locked. However, it is possible to change the locked value if in a future round *r' > r*, if the process receives proposal and *2f + 1* prevotes for a different value *v'*. In this case, *p* could send a prevote/precommit for *id(v')*. This algorithmic feature can be exploited in two ways: + + + +* F2. Faulty Flip-flopping (Amnesia): faulty validators precommit some value *id(v)* in round *r* (value *v* is locked in round *r*) and then prevote for different value *id(v')* in higher round *r' > r* without previously correctly unlocking value *v*. In this case faulty processes "forget" that they have locked value *v* and prevote some other value in the following rounds. +Some correct validators might have decided on *v* in *r*, and other correct validators decide on *v'* in *r'*. Here we can have branching on the main chain (Fork-Full). + + +* F3. Correct Flip-flopping (Back to the past): There are some precommit messages signed by (correct) validators for value *id(v)* in round *r*. Still, *v* is not decided upon, and all processes move on to the next round. Then correct validators (correctly) lock and decide a different value *v'* in some round *r' > r*. And the correct validators continue; there is no branching on the main chain. +However, faulty validators may use the correct precommit messages from round *r* together with a posteriori generated faulty precommit messages for round *r* to forge a block for a value that was not decided on the main chain (Fork-Lite). + + + + + +## Off-chain attacks + +F1-F3 may contaminate the state of full nodes (and even validators). Contaminated (but otherwise correct) full nodes may thus communicate faulty blocks to lite clients. +Similarly, without actually interfering with the main chain, we can have the following: + +* F4. Phantom validators: faulty validators vote (sign prevote and precommit messages) in heights in which they are not part of the validator sets (at the main chain). + +* F5. Lunatic validator: faulty validator that sign vote messages to support (arbitrary) application state that is different from the application state that resulted from valid state transitions. + +## Types of victims + +We consider three types of potential attack victims: + + +- FN: full node +- LCS: lite client with sequential header verification +- LCB: lite client with bisection based header verification + +F1 and F2 can be used by faulty validators to actually create multiple branches on the blockchain. That means that correctly operating full nodes decide on different blocks for the same height. Until a fork is detected locally by a full node (by receiving evidence from others or by some other local check that fails), the full node can spread corrupted blocks to lite clients. + +*Remark.* If full nodes take a branch different from the one taken by the validators, it may be that the liveness of the gossip protocol may be affected. We should eventually look at this more closely. However, as it does not influence safety it is not a primary concern. + +F3 is similar to F1, except that no two correct validators decide on different blocks. It may still be the case that full nodes become affected. + +In addition, without creating a fork on the main chain, lite clients can be contaminated by more than a third of validators that are faulty and sign a forged header +F4 cannot fool correct full nodes as they know the current validator set. Similarly, LCS know who the validators are. Hence, F4 is an attack against LCB that do not necessarily know the complete prefix of headers (Fork-Lite), as they trust a header that is signed by at least one correct validator (trusting period method). + + + + + + +The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a lite client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the lite client. + +F4 and F5 are *off-chain*, that is, these attacks cannot be used to corrupt the state of full nodes (which have sufficient knowledge on the state of the chain to not be fooled). + + +| Attack | FN | LCS | LCB | +|:------:|:------:|:------:|:------:| +| F1 | direct | FN | FN | +| F2 | direct | FN | FN | +| F3 | direct | FN | FN | +| F4 | | | direct | +| F5 | | | direct | + + + +**Q:** Lite clients are more vulnerable than full nodes, because the former do only verify headers but do not execute transactions. What kind of certainty is gained by a full node that executes a transaction? + +As a full node verifies all transactions, it can only be +contaminated by an attack if the blockchain itself violates its invariant (one block per height), that is, in case of a fork that leads to branching. + + + + +## Detailed Attack Scenarios + +### Equivocation based attacks + +In case of equivocation based attacks, faulty validators sign multiple votes (prevote and/or precommit) in the same +round of some height. This attack can be executed on both full nodes and lite clients. It requires more than 1/3 of voting power to be executed. + +#### Scenario 1: Equivocation on the main chain + +Validators: +* CA - a set of correct validators with less than 1/3 of the voting power +* CB - a set of correct validators with less than 1/3 of the voting power +* CA and CB are disjoint +* F - a set of faulty validators with more than 1/3 voting power + +Observe that this setting violates the Tendermint failure model. + +Execution: + +* A faulty proposer proposes block A to CA +* A faulty proposer proposes block B to CB +* Validators from the set CA and CB prevote for A and B, respectively. +* Faulty validators from the set F prevote both for A and B. +* The faulty prevote messages + - for A arrive at CA long before the B messages + - for B arrive at CB long before the A messages +* Therefore correct validators from set CA and CB will observe +more than 2/3 of prevotes for A and B and precommit for A and B, respectively. +* Faulty validators from the set F precommit both values A and B. +* Thus, we have more than 2/3 commits for both A and B. + +Consequences: +* Creating evidence of misbehavior is simple in this case as we have multiple messages signed by the same faulty processes for different values in the same round. + +* We have to ensure that these different messages reach a correct process (full node, monitor?), which can submit evidence. + +* This is an attack on the full node level (Fork-Full). +* It extends also to the lite clients, +* For both we need a detection and recovery mechanism. + +#### Scenario 2: Equivocation to a lite client (LCS) + + +Validators: +* a set F of faulty validators with more than 2/3 of the voting power. + +Execution: +* for the main chain F behaves nicely +* F coordinates to sign a block B that is different from the one on the main chain. +* the lite clients obtains B and trusts at as it is signed by more and 2/3 of the voting power. + +Consequences: + +Once equivocation is used to attack lite client it opens space +for different kind of attacks as application state can be diverged in any direction. For example, it can modify validator set such that it contains only validators that do not have any stake bonded. Note that after a lite client is fooled by a fork, that means that an attacker can change application state and validator set arbitrarily. + +In order to detect such (equivocation-based attack), the lite client would need to cross check its state with some correct validator (or to obtain a hash of the state from the main chain using out of band channels). + +*Remark.* The lite client would be able to create evidence of misbehavior, but this would require to pull potentially a lot of data from correct full nodes. Maybe we need to figure out different architecture where a lite client that is attacked will push all its data for the current unbonding period to a correct node that will inspect this data and submit corresponding evidence. There are also architectures that assumes a special role (sometimes called fisherman) whose goal is to collect as much as possible useful data from the network, to do analysis and create evidence transactions. That functionality is outside the scope of this document. + +*Remark.* The difference between LCS and LCB might only be in the amount of voting power needed to convince lite client about arbitrary state. In case of LCB where security threshold is at minimum, an attacker can arbitrarily modify application state with more than 1/3 of voting power, while in case of LCS it requires more than 2/3 of the voting power. + +### Flip-flopping: Amnesia based attacks + + + +In case of amnesia, faulty validators lock some value *v* in some round *r*, and then vote for different value *v'* in higher rounds without correctly unlocking value *v*. This attack can be used both on full nodes and lite clients. + +#### Scenario 3: At most 2/3 of faults + +Validators: + +* a set F of faulty validators with more than 1/3 but at most 2/3 of the voting power +* a set C of correct validators + +Execution: + +* Faulty validators commit (without exposing it on the main chain) a block A in round *r* by collecting more than 2/3 of the + voting power (containing correct and faulty validators). +* All validators (correct and faulty) reach a round *r' > r*. +* Some correct validators in C do not lock any value before round *r'*. +* The faulty validators in F deviate from Tendermint consensus by ignoring that they locked A in *r*, and propose a different block B in *r'*. +* As the validators in C that have not locked any value find B acceptable, they accept the proposal for B and commit a block B. + +*Remark.* In this case, the more than 1/3 of faulty validators do not need to commit an equivocation (F1) as they only vote once per round in the execution. + +Detecting faulty validators in the case of such an attack can be done by the fork accountability mechanism described in: https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit?usp=sharing. + +If a lite client is attacked using this attack with more than 1/3 of voting power (and less than 2/3), the attacker cannot change the application state arbitrarily. Rather, the attacker is limited to a state a correct validator finds acceptable: In the execution above, correct validators still find the value acceptable, however, the block the lite client trusts deviates from the one on the main chain. + +#### Scenario 4: More than 2/3 of faults + +In case there is an attack with more than 2/3 of the voting power, an attacker can arbitrarily change application state. + +Validators: + +* a set F1 of faulty validators with more than 1/3 of the voting power +* a set F2 of faulty validators with at most 1/3 of the voting power + +Execution + +* Similar to Scenario 3 (however, messages by correct validators are not needed) +* The faulty validators in F1 lock value A in round *r* +* They sign a different value in follow-up rounds +* F2 does not lock A in round *r* + +Consequences: + +* The validators in F1 will be detectable by the the fork accountability mechanisms. +* The validators in F2 cannot be detected using this mechanism. +Only in case they signed something which conflicts with the application this can be used against them. Otherwise they do not do anything incorrect. +* This case is not covered by the report https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit?usp=sharing as it only assumes at most 2/3 of faulty validators. + +**Q:** do we need to define a special kind of attack for the case where a validator sign arbitrarily state? It seems that detecting such attack requires different mechanism that would require as an evidence a sequence of blocks that lead to that state. This might be very tricky to implement. + +### Back to the past + +In this kind of attacks faulty validators take advantage of the fact that they did not sign messages in some of the past rounds. Due to the asynchronous network in which Tendermint operates, we cannot easily differentiate between such an attack and delayed message. This kind of attack can be used at both full nodes and lite clients. + +#### Scenario 5: + +Validators: +* C1 - a set of correct validators with 1/3 of the voting power +* C2 - a set of correct validators with 1/3 of the voting power +* C1 and C2 are disjoint +* F - a set of faulty validators with 1/3 voting power +* one additional faulty process *q* +* F and *q* violate the Tendermint failure model. + + +Execution: + +* in a round *r* of height *h* we have C1 precommitting a value A, +* C2 precommits nil, +* F does not send any message +* *q* precommits nil. +* In some round *r' > r*, F and *q* and C2 commit some other value B different from A. +* F and *fp* "go back to the past" and sign precommit message for value A in round *r*. +* Together with precomit messages of C1 this is sufficient for a commit for value A. + + +Consequences: + +* Only a single faulty validator that previously precommited nil did equivocation, while the other 1/3 of faulty validators actually executed an attack that has exactly the same sequence of messages as part of amnesia attack. Detecting this kind of attack boil down to mechanisms for equivocation and amnesia. + +**Q:** should we keep this as a separate kind of attack? It seems that equivocation, amnesia and phantom validators are the only kind of attack we need to support and this gives us security also in other cases. This would not be surprising as equivocation and amnesia are attacks that followed from the protocol and phantom attack is not really an attack to Tendermint but more to the Proof of stake module. + +### Phantom validators + +In case of phantom validators, processes that are not part of the current validator set but are still bonded (as attack happen during their unbonding period) can be part of the attack by signing vote messages. This attack can be executed against both full nodes and lite clients. + +#### Scenario 6: + +Validators: +* F -- a set of faulty validators that are not part of the validator set on the main chain at height *h + k* + +Execution: + +* There is a fork, and there exists two different headers for height *h + k*, with different validator sets: + - VS2 on the main chain + - forged header VS2', signed by F (and others) + +* a lite client has a trust in a header for height *h* (and the corresponding validator set VS1). +* As part of bisection header verification, it verifies header at height *h + k* with new validator set VS2'. + +Consequences: + +* To detect this, a node needs to see both, the forged header and the canonical header from the chain. +* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set. + +**Remark.** We can have phantom-validator-based attacks as a follow up of equivocation or amnesia based attack where forked state contains validators that are not part of the validator set at the main chain. In this case, they keep signing messages contributed to a forked chain (the wrong branch) although they are not part of the validator set on the main chain. This attack can also be used to attack full node during a period of time he is eclipsed. + +### Lunatic validator + +Lunatic validator agrees to sign commit messages for arbitrary application state. It is used to attack lite clients. +Note that detecting this behavior require application knowledge. Detecting this behavior can probably be done by +referring to the block before the one in which height happen. + +**Q:** can we say that in this case a validator ignore to check if proposed value is valid before voting for it? diff --git a/docs/spec/consensus/light-client.md b/docs/spec/consensus/light-client.md index 4b683b9a6..18dc280a3 100644 --- a/docs/spec/consensus/light-client.md +++ b/docs/spec/consensus/light-client.md @@ -1,113 +1,329 @@ -# Light Client - -A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs -about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client -has the same level of security as Full Node processes (without being itself a Full Node). - -To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. -Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the -voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the -core functionality of the light client is updating the current validator set, that is then used to verify the -blockchain header, and further the corresponding Merkle proofs. - -For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over -Tendermint RPC: - -```golang -Header(height int64) (SignedHeader, error) // returns signed header for the given height -Validators(height int64) (ResultValidators, error) // returns validator set for the given height -LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number - -type SignedHeader struct { - Header Header - Commit Commit - ValSetNumber int64 -} +# Lite client -type ResultValidators struct { - BlockHeight int64 - Validators []Validator - // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight - ValSetTime int64 -} +A lite client is a process that connects to Tendermint full nodes and then tries to verify application data using the Merkle proofs. + +## Context of this document + +In order to make sure that full nodes have the incentive to follow the protocol, we have to address the following three Issues + +1) The lite client needs a method to verify headers it obtains from full nodes according to trust assumptions -- this document. + +2) The lite client must be able to connect to one correct full node to detect and report on failures in the trust assumptions (i.e., conflicting headers) -- a future document. + +3) In the event the trust assumption fails (i.e., a lite client is fooled by a conflicting header), the Tendermint fork accountability protocol must account for the evidence -- see #3840 + +## Problem statement + + +We assume that the lite client knows a (base) header *inithead* it trusts (by social consensus or because the lite client has decided to trust the header before). The goal is to check whether another header *newhead* can be trusted based on the data in *inithead*. + +The correctness of the protocol is based on the assumption that *inithead* was generated by an instance of Tendermint consensus. The term "trusting" above indicates that the correctness on the protocol depends on this assumption. It is in the responsibility of the user that runs the lite client to make sure that the risk of trusting a corrupted/forged *inithead* is negligible. + + +## Definitions + +### Data structures + +In the following, only the details of the data structures needed for this specification are given. + + * header fields + - *height* + - *bfttime*: the chain time when the header (block) was generated + - *V*: validator set containing validators for this block. + - *NextV*: validator set for next block. + - *commit*: evidence that block with height *height* - 1 was committed by a set of validators (canonical commit). We will use ```signers(commit)``` to refer to the set of validators that committed the block. + + * signed header fields: contains a header and a *commit* for the current header; a "seen commit". In the Tendermint consensus the "canonical commit" is stored in header *height* + 1. + + * For each header *h* it has locally stored, the lite client stores whether + it trusts *h*. We write *trust(h) = true*, if this is the case. + + * Validator fields. We will write a validator as a tuple *(v,p)* such that + + *v* is the identifier (we assume identifiers are unique in each validator set) + + *p* is its voting power + + +### Functions + +For the purpose of this lite client specification, we assume that the Tendermint Full Node exposes the following function over Tendermint RPC: +```go + func Commit(height int64) (SignedHeader, error) + // returns signed header: header (with the fields from + // above) with Commit that include signatures of + // validators that signed the header + + + type SignedHeader struct { + Header Header + Commit Commit + } ``` -We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is -being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers -the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time -as validator set init time. -Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely, -given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next -validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator -set), and then starting from the block `H+2`, it will be signed by the next validator set. - -Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function -names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more -clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to -`valSetNumber+1`. - -Locally, light client manages the following state: - -```golang -valSet []Validator // current validator set (last known and verified validator set) -valSetNumber int64 // sequence number of the current validator set -valSetHash []byte // hash of the current validator set -valSetTime int64 // time when the current validator set is initialised +### Definitions + +* *tp*: trusting period +* for realtime *t*, the predicate *correct(v,t)* is true if the validator *v* + follows the protocol until time *t* (we will see about recovery later). + + + + +### Tendermint Failure Model + +If a block *h* is generated at time *bfttime* (and this time is stored in the block), then a set of validators that hold more than 2/3 of the voting power in h.Header.NextV is correct until time h.Header.bfttime + tp. + +Formally, +\[ +\sum_{(v,p) \in h.Header.NextV \wedge correct(v,h.Header.bfttime + tp)} p > +2/3 \sum_{(v,p) \in h.Header.NextV} p +\] + +*Assumption*: "correct" is defined w.r.t. realtime (some Newtonian global notion of time, i.e., wall time), while *bfttime* corresponds to the reading of the local clock of a validator (how this time is computed may change when the Tendermint consensus is modified). In this note, we assume that all clocks are synchronized to realtime. We can make this more precise eventually (incorporating clock drift, accuracy, precision, etc.). Right now, we consider this assumption sufficient, as clock synchronization (under NTP) is in the order of milliseconds and *tp* is in the order of weeks. + +*Remark*: This failure model might change to a hybrid version that takes heights into account in the future. + +The specification in this document considers an implementation of the lite client under this assumption. Issues like *counter-factual signing* and *fork accountability* and *evidence submission* are mechanisms that justify this assumption by incentivizing validators to follow the protocol. +If they don't, and we have more that 1/3 faults, safety may be violated. Our approach then is to *detect* these cases (after the fact), and take suitable repair actions (automatic and social). This is discussed in an upcoming document on "Fork accountability". (These safety violations include the lite client wrongly trusting a header, a fork in the blockchain, etc.) + + +## Lite Client Trusting Spec + +The lite client communicates with a full node and learns new headers. The goal is to locally decide whether to trust a header. Our implementation needs to ensure the following two properties: + +- Lite Client Completeness: If header *h* was correctly generated by an instance of Tendermint consensus (and its age is less than the trusting period), then the lite client should eventually set *trust(h)* to true. + +- Lite Client Accuracy: If header *h* was *not generated* by an instance of Tendermint consensus, then the lite client should never set *trust(h)* to true. + +*Remark*: If in the course of the computation, the lite client obtains certainty that some headers were forged by adversaries (that is were not generated by an instance of Tendermint consensus), it may submit (a subset of) the headers it has seen as evidence of misbehavior. + +*Remark*: In Completeness we use "eventually", while in practice *trust(h)* should be set to true before *h.Header.bfttime + tp*. If not, the block cannot be trusted because it is too old. + +*Remark*: If a header *h* is marked with *trust(h)*, but it is too old (its bfttime is more than *tp* ago), then the lite client should set *trust(h)* to false again. + +*Assumption*: Initially, the lite client has a header *inithead* that it trusts correctly, that is, *inithead* was correctly generated by the Tendermint consensus. + +To reason about the correctness, we may prove the following invariant. + +*Verification Condition: Lite Client Invariant.* + For each lite client *l* and each header *h*: +if *l* has set *trust(h) = true*, + then validators that are correct until time *h.Header.bfttime + tp* have more than two thirds of the voting power in *h.Header.NextV*. + + Formally, + \[ + \sum_{(v,p) \in h.Header.NextV \wedge correct(v,h.Header.bfttime + tp)} p > + 2/3 \sum_{(v,p) \in h.Header.NextV} p + \] + +*Remark.* To prove the invariant, we will have to prove that the lite client only trusts headers that were correctly generated by Tendermint consensus, then the formula above follows from the Tendermint failure model. + + +## High Level Solution + +Upon initialization, the lite client is given a header *inithead* it trusts (by +social consensus). It is assumed that *inithead* satisfies the lite client invariant. (If *inithead* has been correctly generated by Tendermint consensus, the invariant follows from the Tendermint Failure Model.) + +When a lite clients sees a signed new header *snh*, it has to decide whether to trust the new +header. Trust can be obtained by (possibly) the combination of three methods. + +1. **Uninterrupted sequence of proof.** If a block is appended to the chain, where the last block +is trusted (and properly committed by the old validator set in the next block), +and the new block contains a new validator set, the new block is trusted if the lite client knows all headers in the prefix. +Intuitively, a trusted validator set is assumed to only chose a new validator set that will obey the Tendermint Failure Model. + +2. **Trusting period.** Based on a trusted block *h*, and the lite client +invariant, which ensures the fault assumption during the trusting period, we can check whether at least one validator, that has been continuously correct from *h.Header.bfttime* until now, has signed *snh*. +If this is the case, similarly to above, the chosen validator set in *snh* does not violate the Tendermint Failure Model. + +3. **Bisection.** If a check according to the trusting period fails, the lite client can try to obtain a header *hp* whose height lies between *h* and *snh* in order to check whether *h* can be used to get trust for *hp*, and *hp* can be used to get trust for *snh*. If this is the case we can trust *snh*; if not, we may continue recursively. + +## How to use it + +We consider the following use case: + the lite client wants to verify a header for some given height *k*. Thus: + - it requests the signed header for height *k* from a full node + - it tries to verify this header with the methods described here. + +This can be used in several settings: + - someone tells the lite client that application data that is relevant for it can be read in the block of height *k*. + - the lite clients wants the latest state. It asks a full nude for the current height, and uses the response for *k*. + + +## Details + +*Assumptions* + +1. *tp < unbonding period*. +2. *snh.Header.bfttime < now* +3. *snh.Header.bfttime < h.Header.bfttime+tp* +4. *trust(h)=true* + + +**Observation 1.** If *h.Header.bfttime + tp > now*, we trust the old +validator set *h.Header.NextV*. + +When we say we trust *h.Header.NextV* we do *not* trust that each individual validator in *h.Header.NextV* is correct, but we only trust the fact that at most 1/3 of them are faulty (more precisely, the faulty ones have at most 1/3 of the total voting power). + + + +### Functions + +The function *Bisection* checks whether to trust header *h2* based on the trusted header *h1*. It does so by calling +the function *CheckSupport* in the process of +bisection/recursion. *CheckSupport* implements the trusted period method and, for two adjacent headers (in term of heights), it checks uninterrupted sequence of proof. + +*Assumption*: In the following, we assume that *h2.Header.height > h1.Header.height*. We will quickly discuss the other case in the next section. + +We consider the following set-up: +- the lite client communicates with one full node +- the lite client locally stores all the signed headers it obtained (trusted or not). In the pseudo code below we write *Store(header)* for this. +- If *Bisection* returns *false*, then the lite client has seen a forged header. + * However, it does not know which header(s) is/are the problematic one(s). + * In this case, the lite client can submit (some of) the headers it has seen as evidence. As the lite client communicates with one full node only when executing Bisection, there are two cases + - the full node is faulty + - the full node is correct and there was a fork in Tendermint consensus. Header *h1* is from a different branch than the one taken by the full node. This case is not focus of this document, but will be treated in the document on fork accountability. + +- the lite client must retry to retrieve correct headers from another full node + * it picks a new full node + * it restarts *Bisection* + * there might be optimizations; a lite client may not need to call *Commit(k)*, for a height *k* for which it already has a signed header it trusts. + * how to make sure that a lite client can communicate with a correct full node will be the focus of a separate document (recall Issue 3 from "Context of this document"). + +**Auxiliary Functions.** We will use the function ```votingpower_in(V1,V2)``` to compute the voting power the validators in set V1 have according to their voting power in set V2; +we will write ```totalVotingPower(V)``` for ```votingpower_in(V,V)```, which returns the total voting power in V. +We further use the function ```signers(Commit)``` that returns the set of validators that signed the Commit. + +**CheckSupport.** The following function checks whether we can trust the header h2 based on header h1 following the trusting period method. + +```go + func CheckSupport(h1,h2,trustlevel) bool { + if h1.Header.bfttime + tp < now { // Observation 1 + return false // old header was once trusted but it is expired + } + vp_all := totalVotingPower(h1.Header.NextV) + // total sum of voting power of validators in h2 + + if h2.Header.height == h1.Header.height + 1 { + // specific check for adjacent headers; everything must be + // properly signed. + // also check that h2.Header.V == h1.Header.NextV + // Plus the following check that 2/3 of the voting power + // in h1 signed h2 + return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > + 2/3 * vp_all) + // signing validators are more than two third in h1. + } + + return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > + max(1/3,trustlevel) * vp_all) + // get validators in h1 that signed h2 + // sum of voting powers in h1 of + // validators that signed h2 + // is more than a third in h1 + } ``` -The light client is initialised with the trusted validator set, for example based on the known validator set hash, -validator set sequence number and the validator set init time. -The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid, -and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). + *Remark*: Basic header verification must be done for *h2*. Similar checks are done in: + https://github.com/tendermint/tendermint/blob/master/types/validator_set.go#L591-L633 + + *Remark*: There are some sanity checks which are not in the code: + *h2.Header.height > h1.Header.height* and *h2.Header.bfttime > h1.Header.bfttime* and *h2.Header.bfttime < now*. + + *Remark*: ```return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > max(1/3,trustlevel) * vp_all)``` may return false even if *h2* was properly generated by Tendermint consensus in the case of big changes in the validator sets. However, the check ```return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > + 2/3 * vp_all)``` must return true if *h1* and *h2* were generated by Tendermint consensus. + +*Remark*: The 1/3 check differs from a previously proposed method that was based on intersecting validator sets and checking that the new validator set contains "enough" correct validators. We found that the old check is not suited for realistic changes in the validator sets. The new method is not only based on cardinalities, but also exploits that we can trust what is signed by a correct validator (i.e., signed by more than 1/3 of the voting power). -```golang -VerifyAndUpdate(signedHeader SignedHeader): - assertThat signedHeader.valSetNumber >= valSetNumber - if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then - setValidatorSet(signedHeader) +*Correctness arguments* + +Towards Lite Client Accuracy: +- Assume by contradiction that *h2* was not generated correctly and the lite client sets trust to true because *CheckSupport* returns true. +- h1 is trusted and sufficiently new +- by Tendermint Fault Model, less than 1/3 of voting power held by faulty validators => at least one correct validator *v* has signed *h2*. +- as *v* is correct up to now, it followed the Tendermint consensus protocol at least up to signing *h2* => *h2* was correctly generated, we arrive at the required contradiction. + + +Towards Lite Client Completeness: +- The check is successful if sufficiently many validators of *h1* are still validators in *h2* and signed *h2*. +- If *h2.Header.height = h1.Header.height + 1*, and both headers were generated correctly, the test passes + +*Verification Condition:* We may need a Tendermint invariant stating that if *h2.Header.height = h1.Header.height + 1* then *signers(h2.Commit) \subseteq h1.Header.NextV*. + +*Remark*: The variable *trustlevel* can be used if the user believes that relying on one correct validator is not sufficient. However, in case of (frequent) changes in the validator set, the higher the *trustlevel* is chosen, the more unlikely it becomes that CheckSupport returns true for non-adjacent headers. + +**Bisection.** The following function uses CheckSupport in a recursion to find intermediate headers that allow to establish a sequence of trust. + + + + +```go +func Bisection(h1,h2,trustlevel) bool{ + if CheckSupport(h1,h2,trustlevel) { return true - else - updateValidatorSet(signedHeader.ValSetNumber) - return VerifyAndUpdate(signedHeader) - -isValid(signedHeader SignedHeader): - valSetOfTheHeader = Validators(signedHeader.Header.Height) - assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash - assertThat signedHeader is passing basic validation - if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true - else + } + if h2.Header.height == h1.Header.height + 1 { + // we have adjacent headers that are not matching (failed + // the CheckSupport) + // we could submit evidence here return false + } + pivot := (h1.Header.height + h2.Header.height) / 2 + hp := Commit(pivot) + // ask a full node for header of height pivot + Store(hp) + // store header hp locally + if Bisection(h1,hp,trustlevel) { + // only check right branch if hp is trusted + // (otherwise a lot of unnecessary computation may be done) + return Bisection(hp,h2,trustlevel) + } + else { + return false + } +} +``` -setValidatorSet(signedHeader SignedHeader): - nextValSet = Validators(signedHeader.Header.Height) - assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash - valSet = nextValSet.Validators - valSetHash = signedHeader.Header.ValidatorsHash - valSetNumber = signedHeader.ValSetNumber - valSetTime = nextValSet.ValSetTime - -votingPower(commit Commit): - votingPower = 0 - for each precommit in commit.Precommits do: - if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then - votingPower += valSet[precommit.ValidatorAddress].VotingPower - return votingPower - -votingPower(validatorSet []Validator): - for each validator in validatorSet do: - votingPower += validator.VotingPower - return votingPower - -updateValidatorSet(valSetNumberOfTheHeader): - while valSetNumber != valSetNumberOfTheHeader do - signedHeader = LastHeader(valSetNumber) - if isValid(signedHeader) then - setValidatorSet(signedHeader) - else return error - return -``` -Note that in the logic above we assume that the light client will always go upward with respect to header verifications, -i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older -headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent -checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. + + +*Correctness arguments (sketch)* + +Lite Client Accuracy: +- Assume by contradiction that *h2* was not generated correctly and the lite client sets trust to true because Bisection returns true. +- Bisection returns true only if all calls to CheckSupport in the recursion return true. +- Thus we have a sequence of headers that all satisfied the CheckSupport +- again a contradiction + +Lite Client Completeness: + +This is only ensured if upon *Commit(pivot)* the lite client is always provided with a correctly generated header. + +*Stalling* + +With Bisection, a faulty full node could stall a lite client by creating a long sequence of headers that are queried one-by-one by the lite client and look OK, before the lite client eventually detects a problem. There are several ways to address this: +* Each call to ```Commit``` could be issued to a different full node +* Instead of querying header by header, the lite client tells a full node which header it trusts, and the height of the header it needs. The full node responds with the header along with a proof consisting of intermediate headers that the light client can use to verify. Roughly, Bisection would then be executed at the full node. +* We may set a timeout how long bisection may take. + + +### The case *h2.Header.height < h1.Header.height* + +In the use case where someone tells the lite client that application data that is relevant for it can be read in the block of height *k* and the lite client trusts a more recent header, we can use the hashes to verify headers "down the chain." That is, we iterate down the heights and check the hashes in each step. + +*Remark.* For the case were the lite client trusts two headers *i* and *j* with *i < k < j*, we should discuss/experiment whether the forward or the backward method is more effective. + +```go +func Backwards(h1,h2) bool { + assert (h2.Header.height < h1.Header.height) + old := h1 + for i := h1.Header.height - 1; i > h2.Header.height; i-- { + new := Commit(i) + Store(new) + if (hash(new) != old.Header.hash) { + return false + } + old := new + } + return (hash(h2) == old.Header.hash) + } +``` diff --git a/docs/spec/p2p/connection.md b/docs/spec/p2p/connection.md index 47366a549..fd2e7bc4d 100644 --- a/docs/spec/p2p/connection.md +++ b/docs/spec/p2p/connection.md @@ -61,7 +61,7 @@ func (m MConnection) TrySend(chID byte, msg interface{}) bool {} `Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued for the channel with the given id byte `chID`. The message `msg` is serialized -using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. +using the `tendermint/go-amino` submodule's `WriteBinary()` reflection routine. `TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel with the given id byte chID if the queue is not full; otherwise it returns false immediately. diff --git a/docs/spec/reactors/mempool/messages.md b/docs/spec/reactors/mempool/messages.md index 117fc5f2f..9c583ac0f 100644 --- a/docs/spec/reactors/mempool/messages.md +++ b/docs/spec/reactors/mempool/messages.md @@ -13,13 +13,13 @@ type TxMessage struct { } ``` -TxMessage is go-wire encoded and prepended with `0x1` as a -"type byte". This is followed by a go-wire encoded byte-slice. +TxMessage is go-amino encoded and prepended with `0x1` as a +"type byte". This is followed by a go-amino encoded byte-slice. Prefix of 40=0x28 byte tx is: `0x010128...` followed by the actual 40-byte tx. Prefix of 350=0x015e byte tx is: `0x0102015e...` followed by the actual 350 byte tx. -(Please see the [go-wire repo](https://github.com/tendermint/go-wire#an-interface-example) for more information) +(Please see the [go-amino repo](https://github.com/tendermint/go-amino#an-interface-example) for more information) ## RPC Messages diff --git a/docs/spec/rpc/index.html b/docs/spec/rpc/index.html new file mode 100644 index 000000000..d6b0fc5a9 --- /dev/null +++ b/docs/spec/rpc/index.html @@ -0,0 +1,25 @@ + + + + + + Tendermint RPC + + + + + + +
+ + + diff --git a/docs/spec/rpc/swagger.yaml b/docs/spec/rpc/swagger.yaml new file mode 100644 index 000000000..ef16bc72b --- /dev/null +++ b/docs/spec/rpc/swagger.yaml @@ -0,0 +1,2677 @@ +swagger: "2.0" +info: + version: "Master" + title: RPC client for Tendermint + description: A REST interface for state queries, transaction generation and broadcasting. + license: + name: Apache 2.0 + url: https://github.com/tendermint/tendermint/blob/master/LICENSE +tags: + - name: Websocket + description: Subscribe/unsubscribe are reserved for websocket events. + - name: Info + description: Informations about the node APIs + - name: Tx + description: Transactions broadcast APIs + - name: ABCI + description: ABCI APIs + - name: Evidence + description: Evidence APIs +schemes: + - https +host: stargate.cosmos.network:26657 +securityDefinitions: + kms: + type: basic +paths: + /broadcast_tx_sync: + get: + summary: Returns with the response from CheckTx. Does not wait for DeliverTx result. + tags: + - Tx + operationId: broadcast_tx_sync + description: | + If you want to be sure that the transaction is included in a block, you can + subscribe for the result using JSONRPC via a websocket. See + https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html + If you haven't received anything after a couple of blocks, resend it. If the + same happens again, send it to some other node. A few reasons why it could + happen: + + 1. malicious node can drop or pretend it had committed your tx + 2. malicious proposer (not necessary the one you're communicating with) can + drop transactions, which might become valid in the future + (https://github.com/tendermint/tendermint/issues/3322) + + + Please refer to + https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + type: string + required: true + description: The transaction + x-example: "456" + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/BroadcastTxResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /broadcast_tx_async: + get: + summary: Returns right away, with no response. Does not wait for CheckTx nor DeliverTx results. + tags: + - Tx + operationId: broadcast_tx_async + description: | + If you want to be sure that the transaction is included in a block, you can + subscribe for the result using JSONRPC via a websocket. See + https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html + If you haven't received anything after a couple of blocks, resend it. If the + same happens again, send it to some other node. A few reasons why it could + happen: + + 1. malicious node can drop or pretend it had committed your tx + 2. malicious proposer (not necessary the one you're communicating with) can + drop transactions, which might become valid in the future + (https://github.com/tendermint/tendermint/issues/3322) + 3. node can be offline + + Please refer to + https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + type: string + required: true + description: The transaction + x-example: "123" + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/BroadcastTxResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /broadcast_tx_commit: + get: + summary: Returns with the responses from CheckTx and DeliverTx. + tags: + - Tx + operationId: broadcast_tx_commit + description: | + IMPORTANT: use only for testing and development. In production, use + BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction + result using JSONRPC via a websocket. See + https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html + + CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout + waiting for tx to commit. + + If CheckTx or DeliverTx fail, no error will be returned, but the returned result + will contain a non-OK ABCI code. + + Please refer to + https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + type: string + required: true + description: The transaction + x-example: "785" + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/BroadcastTxCommitResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /subscribe: + get: + summary: Subscribe for events via WebSocket. + tags: + - Websocket + operationId: subscribe + description: | + To tell which events you want, you need to provide a query. query is a + string, which has a form: "condition AND condition ..." (no OR at the + moment). condition has a form: "key operation operand". key is a string with + a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). + operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a + string (escaped with single quotes), number, date or time. + + Examples: + tm.event = 'NewBlock' # new blocks + tm.event = 'CompleteProposal' # node got a complete proposal + tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction + tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block + tx.height = 5 # all txs of the fifth block + + Tendermint provides a few predefined keys: tm.event, tx.hash and tx.height. + Note for transactions, you can define additional keys by providing events with + DeliverTx response. + + import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/pubsub/query" + ) + + abci.ResponseDeliverTx{ + Events: []abci.Event{ + { + Type: "rewards.withdraw", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("AddrA")}, + cmn.KVPair{Key: []byte("source"), Value: []byte("SrcX")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "rewards.withdraw", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("AddrB")}, + cmn.KVPair{Key: []byte("source"), Value: []byte("SrcY")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "transfer", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("sender"), Value: []byte("AddrC")}, + cmn.KVPair{Key: []byte("recipient"), Value: []byte("AddrD")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + }, + }, + }, + } + + All events are indexed by a composite key of the form {eventType}.{evenAttrKey}. + In the above examples, the following keys would be indexed: + - rewards.withdraw.address + - rewards.withdraw.source + - rewards.withdraw.amount + - rewards.withdraw.balance + - transfer.sender + - transfer.recipient + - transfer.amount + + Multiple event types with duplicate keys are allowed and are meant to + categorize unique and distinct events. In the above example, all events + indexed under the key `rewards.withdraw.address` will have the following + values stored and queryable: + + - AddrA + - AddrB + + To create a query for txs where address AddrA withdrew rewards: + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'") + + To create a query for txs where address AddrA withdrew rewards from source Y: + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'Y'") + + To create a query for txs where AddrA transferred funds: + query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrA'") + + The following queries would return no results: + query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrZ'") + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'") + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.source = 'W'") + + See list of all possible events here + https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants + + For complete query syntax, check out + https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. + + ```go + import "github.com/tendermint/tendermint/types" + + client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + err := client.Start() + if err != nil { + handle error + } + defer client.Stop() + ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) + defer cancel() + query := "tm.event = 'Tx' AND tx.height = 3" + txs, err := client.Subscribe(ctx, "test-client", query) + if err != nil { + handle error + } + + go func() { + for e := range txs { + fmt.Println("got ", e.Data.(types.EventDataTx)) + } + }() + ``` + parameters: + - in: query + name: query + type: string + required: true + description: | + query is a string, which has a form: "condition AND condition ..." (no OR at the + moment). condition has a form: "key operation operand". key is a string with + a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). + operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a + string (escaped with single quotes), number, date or time. + x-example: tm.event = 'Tx' AND tx.height = 5 + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /unsubscribe: + get: + summary: Unsubscribe from event on Websocket + tags: + - Websocket + operationId: unsubscribe + description: | + ```go + client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + err := client.Start() + if err != nil { + handle error + } + defer client.Stop() + query := "tm.event = 'Tx' AND tx.height = 3" + err = client.Unsubscribe(context.Background(), "test-client", query) + if err != nil { + handle error + } + ``` + parameters: + - in: query + name: query + type: string + required: true + description: | + query is a string, which has a form: "condition AND condition ..." (no OR at the + moment). condition has a form: "key operation operand". key is a string with + a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). + operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a + string (escaped with single quotes), number, date or time. + x-example: tm.event = 'Tx' AND tx.height = 5 + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /unsubscribe_all: + get: + summary: Unsubscribe from all events via WebSocket + tags: + - Websocket + operationId: unsubscribe_all + description: | + Unsubscribe from all events via WebSocket + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /health: + get: + summary: Node heartbeat + tags: + - Info + operationId: health + description: | + Get node health. Returns empty result (200 OK) on success, no response - in case of an error. + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /status: + get: + summary: Node Status + operationId: status + tags: + - Info + description: | + Get Tendermint status including node info, pubkey, latest block hash, app hash, block height and time. + produces: + - application/json + responses: + 200: + description: Status of the node + schema: + $ref: "#/definitions/StatusResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /net_info: + get: + summary: Network informations + operationId: net_info + tags: + - Info + description: | + Get network info. + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/NetInfoResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /blockchain: + get: + summary: Get block headers for minHeight <= height <= maxHeight. + operationId: blockchain + parameters: + - in: query + name: minHeight + type: number + description: Minimum block height to return + x-example: 1 + - in: query + name: maxHeight + type: number + description: Maximum block height to return + x-example: 2 + tags: + - Info + description: | + Get Blockchain info. + produces: + - application/json + responses: + 200: + description: Block headers, returned in descending order (highest first). + schema: + $ref: "#/definitions/BlockchainResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /block: + get: + summary: Get block at a specified height + operationId: block + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get Block. + produces: + - application/json + responses: + 200: + description: Block informations. + schema: + $ref: "#/definitions/BlockResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /block_results: + get: + summary: Get block results at a specified height + operationId: block_results + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch informations regarding the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get block_results. + produces: + - application/json + responses: + 200: + description: Block results. + schema: + $ref: "#/definitions/BlockResultsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /commit: + get: + summary: Get commit results at a specified height + operationId: commit + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get Commit. + produces: + - application/json + responses: + 200: + description: Commit results. + schema: + $ref: "#/definitions/CommitResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /validators: + get: + summary: Get validator set at a specified height + operationId: validators + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch validato set at the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get Validators. + produces: + - application/json + responses: + 200: + description: Commit results. + schema: + $ref: "#/definitions/ValidatorsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /genesis: + get: + summary: Get Genesis + operationId: genesis + tags: + - Info + description: | + Get genesis. + produces: + - application/json + responses: + 200: + description: Genesis results. + schema: + $ref: "#/definitions/GenesisResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /dump_consensus_state: + get: + summary: Get consensus state + operationId: dump_consensus_state + tags: + - Info + description: | + Get consensus state. + produces: + - application/json + responses: + 200: + description: consensus state results. + schema: + $ref: "#/definitions/DumpConsensusResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /consensus_state: + get: + summary: Get consensus state + operationId: consensus_state + tags: + - Info + description: | + Get consensus state. + produces: + - application/json + responses: + 200: + description: consensus state results. + schema: + $ref: "#/definitions/ConsensusStateResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /consensus_params: + get: + summary: Get consensus parameters + operationId: consensus_params + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get consensus parameters. + produces: + - application/json + responses: + 200: + description: consensus parameters results. + schema: + $ref: "#/definitions/ConsensusParamsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /unconfirmed_txs: + get: + summary: Get the list of unconfirmed transactions + operationId: unconfirmed_txs + parameters: + - in: query + name: limit + type: number + description: Maximum number of unconfirmed transactions to return + x-example: 1 + tags: + - Info + description: | + Get list of unconfirmed transactions + produces: + - application/json + responses: + 200: + description: List of unconfirmed transactions + schema: + $ref: "#/definitions/UnconfirmedTransactionsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /num_unconfirmed_txs: + get: + summary: Get data about unconfirmed transactions + operationId: num_unconfirmed_txs + tags: + - Info + description: | + Get data about unconfirmed transactions + produces: + - application/json + responses: + 200: + description: status about unconfirmed transactions + schema: + $ref: "#/definitions/NumUnconfirmedTransactionsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /tx_search: + get: + summary: Search for transactions + operationId: tx_search + parameters: + - in: query + name: query + type: string + description: Query + required: true + x-example: "tx.height=1000" + - in: query + name: prove + type: boolean + description: Include proofs of the transactions inclusion in the block + required: false + x-example: true + default: false + - in: query + name: page + type: number + description: "Page number (1-based)" + required: false + x-example: 1 + default: 1 + - in: query + name: per_page + type: number + description: "Number of entries per page (max: 100)" + required: false + x-example: 30 + default: 30 + tags: + - Info + description: | + Get list of unconfirmed transactions + produces: + - application/json + responses: + 200: + description: List of unconfirmed transactions + schema: + $ref: "#/definitions/TxSearchResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /tx: + get: + summary: Get transactions by hash + operationId: tx + parameters: + - in: query + name: hash + type: string + description: transaction Hash to retrive + required: true + x-example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + - in: query + name: prove + type: boolean + description: Include proofs of the transactions inclusion in the block + required: false + x-example: true + default: false + tags: + - Info + description: | + Get a trasasction + produces: + - application/json + responses: + 200: + description: Get a transaction + schema: + $ref: "#/definitions/TxResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /abci_info: + get: + summary: Get some info about the application. + operationId: abci_info + tags: + - ABCI + description: | + Get some info about the application. + produces: + - application/json + responses: + 200: + description: Get some info about the application. + schema: + $ref: "#/definitions/ABCIInfoResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /abci_query: + get: + summary: Query the application for some information. + operationId: abci_query + parameters: + - in: query + name: path + type: string + description: Path to the data ("/a/b/c") + required: true + x-example: "/a/b/c" + - in: query + name: data + type: string + description: Data + required: true + x-example: "IHAVENOIDEA" + - in: query + name: height + type: number + description: Height (0 means latest) + required: false + x-example: 1 + default: 0 + - in: query + name: prove + type: boolean + description: Include proofs of the transactions inclusion in the block + required: false + x-example: true + default: false + tags: + - ABCI + description: | + Query the application for some information. + produces: + - application/json + responses: + 200: + description: Response of the submitted query + schema: + $ref: "#/definitions/ABCIQueryResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + + /broadcast_evidence: + get: + summary: Broadcast evidence of the misbehavior. + operationId: broadcast_evidence + parameters: + - in: query + name: evidence + type: string + description: Amino-encoded JSON evidence + required: true + x-example: "JSON_EVIDENCE_Amino_encoded" + tags: + - Info + description: | + Broadcast evidence of the misbehavior. + produces: + - application/json + responses: + 200: + description: Broadcast evidence of the misbehavior. + schema: + $ref: "#/definitions/BroadcastEvidenceResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + +definitions: + JSONRPC: + type: object + properties: + id: + type: string + x-example: "" + jsonrpc: + type: string + x-example: "2.0" + EmptyResponse: + description: Empty Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + type: object + additionalProperties: {} + ErrorResponse: + description: Error Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + error: + type: string + x-example: "Description of failure" + ProtocolVersion: + type: object + properties: + p2p: + type: string + x-example: "7" + block: + type: string + x-example: "10" + app: + type: string + x-example: "0" + PubKey: + type: object + properties: + type: + type: string + x-example: "tendermint/PubKeyEd25519" + value: + type: string + x-example: "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4=" + NodeInfo: + type: object + properties: + protocol_version: + $ref: "#/definitions/ProtocolVersion" + id: + type: string + x-example: "5576458aef205977e18fd50b274e9b5d9014525a" + listen_addr: + type: string + x-example: "tcp:0.0.0.0:26656" + network: + type: string + x-example: "cosmoshub-2" + version: + type: string + x-example: "0.32.1" + channels: + type: string + x-example: "4020212223303800" + moniker: + type: string + x-example: "moniker-node" + other: + type: object + properties: + tx_index: + type: string + x-example: "on" + rpc_address: + type: string + x-example: "tcp:0.0.0.0:26657" + x-example: "moniker-node" + SyncInfo: + type: object + properties: + latest_block_hash: + type: string + x-example: "790BA84C3545FCCC49A5C629CEE6EA58A6E875C3862175BDC11EE7AF54703501" + latest_app_hash: + type: string + x-example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" + latest_block_height: + type: string + x-example: "1262196" + latest_block_time: + type: string + x-example: "2019-08-01T11:52:22.818762194Z" + catching_up: + type: boolean + x-example: false + ValidatorInfo: + type: object + properties: + address: + type: string + x-example: "5D6A51A8E9899C44079C6AF90618BA0369070E6E" + pub_key: + $ref: "#/definitions/PubKey" + voting_power: + type: string + x-example: "0" + Status: + description: Status Response + type: object + properties: + node_info: + $ref: "#/definitions/NodeInfo" + sync_info: + $ref: "#/definitions/SyncInfo" + validator_info: + $ref: "#/definitions/ValidatorInfo" + StatusResponse: + description: Status Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/Status" + Monitor: + type: object + properties: + Active: + type: boolean + x-example: true + Start: + type: string + x-example: "2019-07-31T14:31:28.66Z" + Duration: + type: string + x-example: "168901060000000" + Idle: + type: string + x-example: "168901040000000" + Bytes: + type: string + x-example: "5" + Samples: + type: string + x-example: "1" + InstRate: + type: string + x-example: "0" + CurRate: + type: string + x-example: "0" + AvgRate: + type: string + x-example: "0" + PeakRate: + type: string + x-example: "0" + BytesRem: + type: string + x-example: "0" + TimeRem: + type: string + x-example: "0" + Progress: + type: number + x-example: 0 + Channel: + type: object + properties: + ID: + type: number + x-example: 48 + SendQueueCapacity: + type: string + x-example: "1" + SendQueueSize: + type: string + x-example: "0" + Priority: + type: string + x-example: "5" + RecentlySent: + type: string + x-example: "0" + ConnectionStatus: + type: object + properties: + Duration: + type: string + x-example: "168901057956119" + SendMonitor: + $ref: "#/definitions/Monitor" + RecvMonitor: + $ref: "#/definitions/Monitor" + Channels: + type: array + items: + $ref: "#/definitions/Channel" + Peer: + type: object + properties: + node_info: + $ref: "#/definitions/NodeInfo" + is_outbound: + type: boolean + x-example: true + connection_status: + $ref: "#/definitions/ConnectionStatus" + remote_ip: + type: string + x-example: "95.179.155.35" + NetInfo: + type: object + properties: + listening: + type: boolean + x-example: true + listeners: + type: array + items: + type: string + x-example: "Listener(@)" + n_peers: + type: number + x-example: "1" + peers: + type: array + items: + $ref: "#/definitions/Peer" + NetInfoResponse: + description: NetInfo Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/NetInfo" + BlockID: + required: + - "hash" + - "parts" + properties: + hash: + type: string + x-example: "D82C2734BB0E76C772A10994B210EF9D11505D1B98CB189D9CF7F9A5488672A5" + parts: + required: + - "total" + - "hash" + properties: + total: + type: string + x-example: "1" + hash: + type: string + x-example: "CB02DCAA7FB46BF874052EC2273FD0B1F2CF2E1593298D9781E60FE9C3DB8638" + type: object + type: object + BlockMetaHeader: + required: + - "version" + - "chain_id" + - "height" + - "time" + - "num_txs" + - "total_txs" + - "last_block_id" + - "last_commit_hash" + - "data_hash" + - "validators_hash" + - "next_validators_hash" + - "consensus_hash" + - "app_hash" + - "last_results_hash" + - "evidence_hash" + - "proposer_address" + properties: + version: + required: + - "block" + - "app" + properties: + block: + type: string + x-example: "10" + app: + type: string + x-example: "0" + type: object + chain_id: + type: string + x-example: "cosmoshub-2" + height: + type: string + x-example: "12" + time: + type: string + x-example: "2019-04-22T17:01:51.701356223Z" + num_txs: + type: string + x-example: "2" + total_txs: + type: string + x-example: "3" + last_block_id: + $ref: "#/definitions/BlockID" + last_commit_hash: + type: string + x-example: "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812" + data_hash: + type: string + x-example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" + validators_hash: + type: string + x-example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + next_validators_hash: + type: string + x-example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + consensus_hash: + type: string + x-example: "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8" + app_hash: + type: string + x-example: "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C" + last_results_hash: + type: string + x-example: "" + evidence_hash: + type: string + x-example: "" + proposer_address: + type: string + x-example: "D540AB022088612AC74B287D076DBFBC4A377A2E" + type: object + BlockMetaId: + required: + - "hash" + - "parts" + properties: + hash: + type: string + x-example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" + parts: + required: + - "total" + - "hash" + properties: + total: + type: string + x-example: "1" + hash: + type: string + x-example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + type: object + type: object + BlockMeta: + type: object + properties: + block_id: + $ref: "#/definitions/BlockMetaId" + header: + $ref: "#/definitions/BlockMetaHeader" + Blockchain: + type: object + required: + - "last_height" + - "block_metas" + properties: + last_height: + type: string + x-example: "1276718" + block_metas: + type: "array" + items: + $ref: "#/definitions/BlockMeta" + BlockchainResponse: + description: Blockchain info + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/Blockchain" + Commit: + required: + - "type" + - "height" + - "round" + - "block_id" + - "timestamp" + - "validator_address" + - "validator_index" + - "signature" + properties: + type: + type: number + x-example: 2 + height: + type: string + x-example: "1262085" + round: + type: string + x-example: "0" + block_id: + $ref: "#/definitions/BlockID" + timestamp: + type: string + x-example: "2019-08-01T11:39:38.867269833Z" + validator_address: + type: string + x-example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + validator_index: + type: string + x-example: "0" + signature: + type: string + x-example: "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg==" + Block: + type: object + properties: + header: + $ref: "#/definitions/BlockMetaHeader" + data: + type: array + items: + type: string + x-example: "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0=" + evidence: + type: array + items: + $ref: "#/definitions/Evidence" + last_commit: + type: object + properties: + block_id: + $ref: "#/definitions/BlockID" + precommits: + type: array + items: + $ref: "#/definitions/Commit" + Validator: + type: object + properties: + pub_key: + $ref: "#/definitions/PubKey" + voting_power: + type: number + address: + type: string + Evidence: + type: object + properties: + type: + type: string + height: + type: number + time: + type: number + total_voting_power: + type: number + validator: + $ref: "#/definitions/Validator" + BlockComplete: + type: object + properties: + block_meta: + $ref: "#/definitions/BlockMeta" + block: + $ref: "#/definitions/Block" + BlockResponse: + description: Blockc info + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/BlockComplete" + Tag: + type: object + properties: + key: + type: string + example: "YWN0aW9u" + value: + type: string + example: "c2VuZA==" + ################## FROM NOW ON NEEDS REFACTOR ################## + BlockResultsResponse: + type: "object" + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "height" + - "results" + properties: + height: + type: "string" + example: "12" + results: + required: + - "deliver_tx" + - "end_block" + - "begin_block" + properties: + deliver_tx: + type: "array" + x-nullable: true + items: + type: "object" + properties: + log: + type: "string" + example: '[{"msg_index":"0","success":true,"log":""}]' + gasWanted: + type: "string" + example: "25629" + gasUsed: + type: "string" + example: "25629" + tags: + type: "array" + items: + type: "object" + properties: + key: + type: "string" + example: "YWN0aW9u" + value: + type: "string" + example: "c2VuZA==" + end_block: + required: + - "validator_updates" + properties: {} + type: "object" + begin_block: + properties: {} + type: "object" + type: "object" + type: "object" + CommitResponse: + type: "object" + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "signed_header" + - "canonical" + properties: + signed_header: + required: + - "header" + - "commit" + properties: + header: + required: + - "version" + - "chain_id" + - "height" + - "time" + - "num_txs" + - "total_txs" + - "last_block_id" + - "last_commit_hash" + - "data_hash" + - "validators_hash" + - "next_validators_hash" + - "consensus_hash" + - "app_hash" + - "last_results_hash" + - "evidence_hash" + - "proposer_address" + properties: + version: + required: + - "block" + - "app" + properties: + block: + type: "string" + example: "10" + app: + type: "string" + example: "0" + type: "object" + chain_id: + type: "string" + example: "cosmoshub-2" + height: + type: "string" + example: "12" + time: + type: "string" + example: "2019-04-22T17:01:51.701356223Z" + num_txs: + type: "string" + example: "2" + total_txs: + type: "string" + example: "3" + last_block_id: + required: + - "hash" + - "parts" + properties: + hash: + type: "string" + example: "D82C2734BB0E76C772A10994B210EF9D11505D1B98CB189D9CF7F9A5488672A5" + parts: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "1" + hash: + type: "string" + example: "CB02DCAA7FB46BF874052EC2273FD0B1F2CF2E1593298D9781E60FE9C3DB8638" + type: "object" + type: "object" + last_commit_hash: + type: "string" + example: "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812" + data_hash: + type: "string" + example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" + validators_hash: + type: "string" + example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + next_validators_hash: + type: "string" + example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + consensus_hash: + type: "string" + example: "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8" + app_hash: + type: "string" + example: "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C" + last_results_hash: + type: "string" + example: "" + evidence_hash: + type: "string" + example: "" + proposer_address: + type: "string" + example: "D540AB022088612AC74B287D076DBFBC4A377A2E" + type: "object" + commit: + required: + - "block_id" + - "precommits" + properties: + block_id: + required: + - "hash" + - "parts" + properties: + hash: + type: "string" + example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" + parts: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "1" + hash: + type: "string" + example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + type: "object" + type: "object" + precommits: + type: "array" + items: + type: "object" + properties: + type: + type: "number" + example: 2 + height: + type: "string" + example: "12" + round: + type: "string" + example: "0" + block_id: + required: + - "hash" + - "parts" + properties: + hash: + type: "string" + example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" + parts: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "1" + hash: + type: "string" + example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + type: "object" + type: "object" + timestamp: + type: "string" + example: "2019-04-22T17:01:58.376629719Z" + validator_address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + validator_index: + type: "string" + example: "0" + signature: + type: "string" + example: "14jaTQXYRt8kbLKEhdHq7AXycrFImiLuZx50uOjs2+Zv+2i7RTG/jnObD07Jo2ubZ8xd7bNBJMqkgtkd0oQHAw==" + type: "object" + type: "object" + canonical: + type: "boolean" + example: true + type: "object" + ValidatorsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "block_height" + - "validators" + properties: + block_height: + type: "string" + example: "55" + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + type: "object" + voting_power: + type: "string" + example: "250353" + proposer_priority: + type: "string" + example: "13769415" + type: "object" + GenesisResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "genesis" + properties: + genesis: + required: + - "genesis_time" + - "chain_id" + - "consensus_params" + - "validators" + - "app_hash" + properties: + genesis_time: + type: "string" + example: "2019-04-22T17:00:00Z" + chain_id: + type: "string" + example: "cosmoshub-2" + consensus_params: + required: + - "block" + - "evidence" + - "validator" + properties: + block: + required: + - "max_bytes" + - "max_gas" + - "time_iota_ms" + properties: + max_bytes: + type: "string" + example: "200000" + max_gas: + type: "string" + example: "2000000" + time_iota_ms: + type: "string" + example: "1000" + type: "object" + evidence: + required: + - "max_age" + properties: + max_age: + type: "string" + example: "1000000" + type: "object" + validator: + required: + - "pub_key_types" + properties: + pub_key_types: + type: "array" + items: + type: "string" + example: + - "ed25519" + type: "object" + type: "object" + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "B00A6323737F321EB0B8D59C6FD497A14B60938A" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" + type: "object" + power: + type: "string" + example: "9328525" + name: + type: "string" + example: "Certus One" + app_hash: + type: "string" + example: "" + app_state: + properties: {} + type: "object" + type: "object" + type: "object" + DumpConsensusResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "round_state" + - "peers" + properties: + round_state: + required: + - "height" + - "round" + - "step" + - "start_time" + - "commit_time" + - "validators" + - "proposal" + - "proposal_block" + - "proposal_block_parts" + - "locked_round" + - "locked_block" + - "locked_block_parts" + - "valid_round" + - "valid_block" + - "valid_block_parts" + - "votes" + - "commit_round" + - "last_commit" + - "last_validators" + - "triggered_timeout_precommit" + properties: + height: + type: "string" + example: "1311801" + round: + type: "string" + example: "0" + step: + type: "number" + example: 3 + start_time: + type: "string" + example: "2019-08-05T11:28:49.064658805Z" + commit_time: + type: "string" + example: "2019-08-05T11:28:44.064658805Z" + validators: + required: + - "validators" + - "proposer" + properties: + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + type: "object" + voting_power: + type: "string" + example: "239727" + proposer_priority: + type: "string" + example: "-11896414" + proposer: + required: + - "address" + - "pub_key" + - "voting_power" + - "proposer_priority" + properties: + address: + type: "string" + example: "708FDDCE121CDADA502F2B0252FEF13FDAA31E50" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "VNMNfw7mrQBSpEvCtA9ykOe6BoR00RM9b/a9v3vXZhY=" + type: "object" + voting_power: + type: "string" + example: "295360" + proposer_priority: + type: "string" + example: "-88886833" + type: "object" + type: "object" + locked_round: + type: "string" + example: "-1" + valid_round: + type: "string" + example: "-1" + votes: + type: "array" + items: + type: "object" + properties: + round: + type: "string" + example: "0" + prevotes: + type: "array" + x-nullable: true + items: + type: "string" + example: + - "nil-Vote" + - "Vote{19:46A3F8B8393B 1311801/00/1(Prevote) 000000000000 64CE682305CB @ 2019-08-05T11:28:47.374703444Z}" + prevotes_bit_array: + type: "string" + example: "BA{100:___________________x________________________________________________________________________________} 209706/170220253 = 0.00" + precommits: + type: "array" + x-nullable: true + items: + type: "string" + example: + - "nil-Vote" + precommits_bit_array: + type: "string" + example: "BA{100:____________________________________________________________________________________________________} 0/170220253 = 0.00" + commit_round: + type: "string" + example: "-1" + last_commit: + x-nullable: true + required: + - "votes" + - "votes_bit_array" + - "peer_maj_23s" + properties: + votes: + type: "array" + items: + type: "string" + example: + - "Vote{0:000001E443FD 1311800/00/2(Precommit) 3071ADB27D1A 77EE1B6B6847 @ 2019-08-05T11:28:43.810128139Z}" + votes_bit_array: + type: "string" + example: "BA{100:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} 170220253/170220253 = 1.00" + peer_maj_23s: + properties: {} + type: "object" + type: "object" + last_validators: + required: + - "validators" + - "proposer" + properties: + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + type: "object" + voting_power: + type: "string" + example: "239727" + proposer_priority: + type: "string" + example: "-12136141" + proposer: + required: + - "address" + - "pub_key" + - "voting_power" + - "proposer_priority" + properties: + address: + type: "string" + example: "B00A6323737F321EB0B8D59C6FD497A14B60938A" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" + type: "object" + voting_power: + type: "string" + example: "8590153" + proposer_priority: + type: "string" + example: "-79515145" + type: "object" + type: "object" + triggered_timeout_precommit: + type: "boolean" + example: false + type: "object" + peers: + type: "array" + items: + type: "object" + properties: + node_address: + type: "string" + example: "357f6a6c1d27414579a8185060aa8adf9815c43c@68.183.41.207:26656" + peer_state: + required: + - "round_state" + - "stats" + properties: + round_state: + required: + - "height" + - "round" + - "step" + - "start_time" + - "proposal" + - "proposal_block_parts_header" + - "proposal_block_parts" + - "proposal_pol_round" + - "proposal_pol" + - "prevotes" + - "precommits" + - "last_commit_round" + - "last_commit" + - "catchup_commit_round" + - "catchup_commit" + properties: + height: + type: "string" + example: "1311801" + round: + type: "string" + example: "0" + step: + type: "number" + example: 3 + start_time: + type: "string" + example: "2019-08-05T11:28:49.21730864Z" + proposal: + type: "boolean" + example: false + proposal_block_parts_header: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "0" + hash: + type: "string" + example: "" + type: "object" + proposal_pol_round: + x-nullable: true + type: "string" + example: "-1" + proposal_pol: + x-nullable: true + type: "string" + example: "____________________________________________________________________________________________________" + prevotes: + x-nullable: true + type: "string" + example: "___________________x________________________________________________________________________________" + precommits: + x-nullable: true + type: "string" + example: "____________________________________________________________________________________________________" + last_commit_round: + x-nullable: true + type: "string" + example: "0" + last_commit: + x-nullable: true + type: "string" + example: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + catchup_commit_round: + type: "string" + x-nullable: true + example: "-1" + catchup_commit: + x-nullable: true + type: "string" + example: "____________________________________________________________________________________________________" + type: "object" + stats: + required: + - "votes" + - "block_parts" + properties: + votes: + type: "string" + example: "1159558" + block_parts: + type: "string" + example: "4786" + type: "object" + type: "object" + type: "object" + ConsensusStateResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "round_state" + properties: + round_state: + required: + - "height/round/step" + - "start_time" + - "proposal_block_hash" + - "locked_block_hash" + - "valid_block_hash" + - "height_vote_set" + properties: + height/round/step: + type: "string" + example: "1262197/0/8" + start_time: + type: "string" + example: "2019-08-01T11:52:38.962730289Z" + proposal_block_hash: + type: "string" + example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" + locked_block_hash: + type: "string" + example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" + valid_block_hash: + type: "string" + example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" + height_vote_set: + type: "array" + items: + type: "object" + properties: + round: + type: "string" + example: "0" + prevotes: + type: "array" + items: + type: "string" + example: + - "Vote{0:000001E443FD 1262197/00/1(Prevote) 634ADAF1F402 7BB974E1BA40 @ 2019-08-01T11:52:35.513572509Z}" + - "nil-Vote" + prevotes_bit_array: + type: "string" + example: "BA{100:xxxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} 169753436/170151262 = 1.00" + precommits: + type: "array" + items: + type: "string" + example: + - "Vote{5:18C78D135C9D 1262197/00/2(Precommit) 634ADAF1F402 8B5EFFFEABCD @ 2019-08-01T11:52:36.25600005Z}" + - "nil-Vote" + precommits_bit_array: + type: "string" + example: "BA{100:xxxxxx_xxxxx_xxxx_x_xxx_xx_xx_xx__x_x_x__xxxxxxxxxxxxxx_xxxx_xx_xxxxxx_xxxxxxxx_xxxx_xxx_x_xxxx__xxx} 118726247/170151262 = 0.70" + type: "object" + type: "object" + ConsensusParamsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "block_height" + - "consensus_params" + properties: + block_height: + type: "string" + example: "1313448" + consensus_params: + required: + - "block" + - "evidence" + - "validator" + properties: + block: + required: + - "max_bytes" + - "max_gas" + - "time_iota_ms" + properties: + max_bytes: + type: "string" + example: "200000" + max_gas: + type: "string" + example: "2000000" + time_iota_ms: + type: "string" + example: "1000" + type: "object" + evidence: + required: + - "max_age" + properties: + max_age: + type: "string" + example: "1000000" + type: "object" + validator: + required: + - "pub_key_types" + properties: + pub_key_types: + type: "array" + items: + type: "string" + example: + - "ed25519" + type: "object" + type: "object" + type: "object" + NumUnconfirmedTransactionsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "n_txs" + - "total" + - "total_bytes" + properties: + n_txs: + type: "string" + example: "31" + total: + type: "string" + example: "82" + total_bytes: + type: "string" + example: "19974" + # txs: + # type: "array" + # x-nullable: true + # items: + # type: "string" + # x-nullable: true + # example: + # - "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" + type: "object" + UnconfirmedTransactionsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "n_txs" + - "total" + - "total_bytes" + - "txs" + properties: + n_txs: + type: "string" + example: "82" + total: + type: "string" + example: "82" + total_bytes: + type: "string" + example: "19974" + txs: + type: array + x-nullable: true + items: + type: string + x-nullable: true + example: + - "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" + type: "object" + TxSearchResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "txs" + - "total_count" + properties: + txs: + type: "array" + items: + type: "object" + properties: + hash: + type: "string" + example: "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + height: + type: "string" + example: "1000" + index: + type: "number" + example: 0 + tx_result: + required: + - "log" + - "gasWanted" + - "gasUsed" + - "tags" + properties: + log: + type: "string" + example: '[{"msg_index":"0","success":true,"log":""}]' + gasWanted: + type: "string" + example: "200000" + gasUsed: + type: "string" + example: "28596" + tags: + type: "array" + items: + type: "object" + properties: + key: + type: "string" + example: "YWN0aW9u" + value: + type: "string" + example: "c2VuZA==" + type: "object" + tx: + type: "string" + example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + proof: + required: + - "RootHash" + - "Data" + - "Proof" + properties: + RootHash: + type: "string" + example: "72FE6BF6D4109105357AECE0A82E99D0F6288854D16D8767C5E72C57F876A14D" + Data: + type: "string" + example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + Proof: + required: + - "total" + - "index" + - "leaf_hash" + - "aunts" + properties: + total: + type: "string" + example: "2" + index: + type: "string" + example: "0" + leaf_hash: + type: "string" + example: "eoJxKCzF3m72Xiwb/Q43vJ37/2Sx8sfNS9JKJohlsYI=" + aunts: + type: "array" + items: + type: "string" + example: + - "eWb+HG/eMmukrQj4vNGyFYb3nKQncAWacq4HF5eFzDY=" + type: "object" + type: "object" + total_count: + type: "string" + example: "2" + type: "object" + TxResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "hash" + - "height" + - "index" + - "tx_result" + - "tx" + properties: + hash: + type: "string" + example: "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + height: + type: "string" + example: "1000" + index: + type: "number" + example: 0 + tx_result: + required: + - "log" + - "gasWanted" + - "gasUsed" + - "tags" + properties: + log: + type: "string" + example: '[{"msg_index":"0","success":true,"log":""}]' + gasWanted: + type: "string" + example: "200000" + gasUsed: + type: "string" + example: "28596" + tags: + type: "array" + items: + type: "object" + properties: + key: + type: "string" + example: "YWN0aW9u" + value: + type: "string" + example: "c2VuZA==" + type: "object" + tx: + type: "string" + example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + type: "object" + ABCIInfoResponse: + type: object + required: + - "jsonrpc" + - "id" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "response" + properties: + response: + required: + - "data" + - "app_version" + - "version" + properties: + data: + type: "string" + example: '{"size":0}' + version: + type: string + example: "0.16.1" + app_version: + type: "string" + example: "1314126" + type: "object" + type: "object" + ABCIQueryResponse: + type: object + required: + - "error" + - "result" + - "id" + - "jsonrpc" + properties: + error: + type: "string" + example: "" + result: + required: + - "response" + properties: + response: + required: + - "log" + - "height" + - "proof" + - "value" + - "key" + - "index" + - "code" + properties: + log: + type: "string" + example: "exists" + height: + type: "string" + example: "0" + proof: + type: "string" + example: "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C" + value: + type: "string" + example: "61626364" + key: + type: "string" + example: "61626364" + index: + type: "string" + example: "-1" + code: + type: "string" + example: "0" + type: "object" + type: "object" + id: + type: "string" + example: "" + jsonrpc: + type: "string" + example: "2.0" + BroadcastEvidenceResponse: + type: object + required: + - "id" + - "jsonrpc" + properties: + error: + type: "string" + example: "" + result: + type: "string" + example: "" + id: + type: "string" + example: "" + jsonrpc: + type: "string" + example: "2.0" + BroadcastTxCommitResponse: + type: object + required: + - "error" + - "result" + - "id" + - "jsonrpc" + properties: + error: + type: "string" + example: "" + result: + required: + - "height" + - "hash" + - "deliver_tx" + - "check_tx" + properties: + height: + type: "string" + example: "26682" + hash: + type: "string" + example: "75CA0F856A4DA078FC4911580360E70CEFB2EBEE" + deliver_tx: + required: + - "log" + - "data" + - "code" + properties: + log: + type: "string" + example: "" + data: + type: "string" + example: "" + code: + type: "string" + example: "0" + type: "object" + check_tx: + required: + - "log" + - "data" + - "code" + properties: + log: + type: "string" + example: "" + data: + type: "string" + example: "" + code: + type: "string" + example: "0" + type: "object" + type: "object" + id: + type: "string" + example: "" + jsonrpc: + type: "string" + example: "2.0" + BroadcastTxResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + - "error" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "code" + - "data" + - "log" + - "hash" + properties: + code: + type: "string" + example: "0" + data: + type: "string" + example: "" + log: + type: "string" + example: "" + hash: + type: "string" + example: "0D33F2F03A5234F38706E43004489E061AC40A2E" + type: "object" + error: + type: "string" + example: "" diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index 8fd369d8f..59a259669 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -240,8 +240,9 @@ max_txs_bytes = 1073741824 # Size of the cache (used to filter transactions we saw earlier) in transactions cache_size = 10000 -# Limit the size of TxMessage -max_msg_bytes = 1048576 +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +max_tx_bytes = 1048576 ##### fast sync configuration options ##### [fastsync] diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md index 26b90ed70..0653a5639 100644 --- a/docs/tools/monitoring.md +++ b/docs/tools/monitoring.md @@ -87,6 +87,6 @@ websocket. ## Development ``` -make get_tools +make tools make test ``` diff --git a/dredd.yml b/dredd.yml new file mode 100644 index 000000000..0db3d767d --- /dev/null +++ b/dredd.yml @@ -0,0 +1,33 @@ +color: true +dry-run: null +hookfiles: build/contract_tests +language: go +require: null +server: make localnet-start +server-wait: 30 +init: false +custom: {} +names: false +only: [] +reporter: [] +output: [] +header: [] +sorted: false +user: null +inline-errors: false +details: false +method: [GET] +loglevel: warning +path: [] +hooks-worker-timeout: 5000 +hooks-worker-connect-timeout: 1500 +hooks-worker-connect-retry: 500 +hooks-worker-after-connect-wait: 100 +hooks-worker-term-timeout: 5000 +hooks-worker-term-retry: 500 +hooks-worker-handler-host: 127.0.0.1 +hooks-worker-handler-port: 61321 +config: ./dredd.yml +# This path accepts no variables +blueprint: ./docs/spec/rpc/swagger.yaml +endpoint: 'http://127.0.0.1:26657/' diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 9603e6680..006978b3b 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -203,6 +203,7 @@ func TestEvidenceListMessageValidationBasic(t *testing.T) { }, true}, } for _, tc := range testCases { + tc := tc t.Run(tc.testName, func(t *testing.T) { evListMsg := &EvidenceListMessage{} n := 3 diff --git a/go.mod b/go.mod index 1bf4f0277..ddb067c29 100644 --- a/go.mod +++ b/go.mod @@ -8,42 +8,27 @@ require ( github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a - github.com/fortytw2/leaktest v1.2.0 - github.com/go-kit/kit v0.6.0 - github.com/go-logfmt/logfmt v0.3.0 - github.com/go-stack/stack v1.8.0 // indirect - github.com/gogo/protobuf v1.2.1 + github.com/fortytw2/leaktest v1.3.0 + github.com/go-kit/kit v0.9.0 + github.com/go-logfmt/logfmt v0.4.0 + github.com/gogo/protobuf v1.3.0 github.com/golang/protobuf v1.3.2 github.com/google/gofuzz v1.0.0 // indirect - github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect - github.com/gorilla/websocket v1.2.0 - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 // indirect + github.com/gorilla/websocket v1.4.1 github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect - github.com/libp2p/go-buffer-pool v0.0.1 - github.com/magiconair/properties v1.8.0 - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect - github.com/pelletier/go-toml v1.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.0.2 + github.com/magiconair/properties v1.8.1 github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v0.9.1 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect - github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 // indirect - github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect + github.com/prometheus/client_golang v0.9.3 github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 - github.com/rs/cors v1.6.0 - github.com/spf13/afero v1.1.2 // indirect - github.com/spf13/cast v1.3.0 // indirect + github.com/rs/cors v1.7.0 + github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v0.0.1 - github.com/spf13/jwalterweatherman v1.0.0 // indirect - github.com/spf13/pflag v1.0.3 // indirect - github.com/spf13/viper v1.0.0 - github.com/stretchr/testify v1.3.0 + github.com/spf13/viper v1.4.0 + github.com/stretchr/testify v1.4.0 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tm-db v0.1.1 - golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c // indirect golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 golang.org/x/net v0.0.0-20190628185345-da137c7871d7 - google.golang.org/grpc v1.22.0 + google.golang.org/grpc v1.23.1 ) diff --git a/go.sum b/go.sum index a8a72a82d..79370588d 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,19 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Workiva/go-datastructures v1.0.50 h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo= github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d h1:xG8Pj6Y6J760xwETNmMzmlt38QSwz0BLp1cZ09g27uw= github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -18,65 +24,97 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q= -github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-kit/kit v0.6.0 h1:wTifptAGIyIuir4bRyN4h7+kAa2a4eepLYVmRe5qqQ8= -github.com/go-kit/kit v0.6.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 h1:XTnP8fJpa4Kvpw2qARB4KS9izqxPS0Sd92cDlY3uk+w= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ= -github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/libp2p/go-buffer-pool v0.0.1 h1:9Rrn/H46cXjaA2HQ5Y8lyhOS1NhTkZ4yuEs2r3Eechg= -github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -84,22 +122,37 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 h1:Cto4X6SVMWRPBkJ/3YHn1iDGDGc/Z+sW+AEMKHMVvN4= -github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk= github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= +github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= @@ -110,59 +163,95 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I= -github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= -github.com/tendermint/tm-db v0.0.0-20190731085305-94017c88bf1d h1:yCHL2COLGLNfb4sA9AlzIHpapb8UATvAQyJulS6Eg6Q= -github.com/tendermint/tm-db v0.0.0-20190731085305-94017c88bf1d/go.mod h1:0cPKWu2Mou3IlxecH+MEUSYc1Ch537alLe6CpFrKzgw= github.com/tendermint/tm-db v0.1.1 h1:G3Xezy3sOk9+ekhjZ/kjArYIs1SmwV+1OUgNkj7RgV0= github.com/tendermint/tm-db v0.1.1/go.mod h1:0cPKWu2Mou3IlxecH+MEUSYc1Ch537alLe6CpFrKzgw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c h1:Rx/HTKi09myZ25t1SOlDHmHOy/mKxNAcu0hP1oPX9qM= -golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 h1:67iHsV9djwGdZpdZNbLuQj6FOzCaZe3w+vhLjn5AcFA= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index 09ec8af25..acf745cc8 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -232,6 +232,7 @@ func TestJSONMarshalUnmarshal(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.bA.String(), func(t *testing.T) { bz, err := json.Marshal(tc.bA) require.NoError(t, err) diff --git a/libs/common/bytes_test.go b/libs/common/bytes_test.go index 9e11988f2..49c8cacb9 100644 --- a/libs/common/bytes_test.go +++ b/libs/common/bytes_test.go @@ -40,6 +40,7 @@ func TestJSONMarshal(t *testing.T) { } for i, tc := range cases { + tc := tc t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { ts := TestStruct{B1: tc.input, B2: tc.input} diff --git a/libs/flowrate/io_test.go b/libs/flowrate/io_test.go index ab2c7121f..d482a7b72 100644 --- a/libs/flowrate/io_test.go +++ b/libs/flowrate/io_test.go @@ -89,6 +89,7 @@ func TestReader(t *testing.T) { {false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, } for i, s := range status { + s := s if !statusesAreEqual(&s, &want[i]) { t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) } @@ -143,6 +144,7 @@ func TestWriter(t *testing.T) { {true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, } for i, s := range status { + s := s if !statusesAreEqual(&s, &want[i]) { t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) } diff --git a/libs/log/filter_test.go b/libs/log/filter_test.go index f9957f043..cf4dc7495 100644 --- a/libs/log/filter_test.go +++ b/libs/log/filter_test.go @@ -55,6 +55,7 @@ func TestVariousLevels(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { var buf bytes.Buffer logger := log.NewFilter(log.NewTMJSONLogger(&buf), tc.allowed) diff --git a/libs/log/tm_logger_test.go b/libs/log/tm_logger_test.go index 1f890cef1..e4fdbaa21 100644 --- a/libs/log/tm_logger_test.go +++ b/libs/log/tm_logger_test.go @@ -6,7 +6,6 @@ import ( "strings" "testing" - "github.com/go-logfmt/logfmt" "github.com/tendermint/tendermint/libs/log" ) @@ -16,7 +15,7 @@ func TestLoggerLogsItsErrors(t *testing.T) { logger := log.NewTMLogger(&buf) logger.Info("foo", "baz baz", "bar") msg := strings.TrimSpace(buf.String()) - if !strings.Contains(msg, logfmt.ErrInvalidKey.Error()) { + if !strings.Contains(msg, "foo") { t.Errorf("Expected logger msg to contain ErrInvalidKey, got %s", msg) } } diff --git a/lite/base_verifier.go b/lite/base_verifier.go index 9eb880bb2..9fe583b0f 100644 --- a/lite/base_verifier.go +++ b/lite/base_verifier.go @@ -3,6 +3,8 @@ package lite import ( "bytes" + "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" @@ -63,7 +65,7 @@ func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error { // Do basic sanity checks. err := signedHeader.ValidateBasic(bv.chainID) if err != nil { - return cmn.ErrorWrap(err, "in verify") + return errors.Wrap(err, "in verify") } // Check commit signatures. @@ -71,7 +73,7 @@ func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error { bv.chainID, signedHeader.Commit.BlockID, signedHeader.Height, signedHeader.Commit) if err != nil { - return cmn.ErrorWrap(err, "in verify") + return errors.Wrap(err, "in verify") } return nil diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index c95fee9ec..9a5a62816 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -13,6 +13,8 @@ import ( dbm "github.com/tendermint/tm-db" ) +const testChainID = "inquiry-test" + func TestInquirerValidPath(t *testing.T) { assert, require := assert.New(t), require.New(t) trust := NewDBProvider("trust", dbm.NewMemDB()) @@ -24,7 +26,7 @@ func TestInquirerValidPath(t *testing.T) { nkeys := keys.Extend(1) // Construct a bunch of commits, each with one more height than the last. - chainID := "inquiry-test" + chainID := testChainID consHash := []byte("params") resHash := []byte("results") count := 50 @@ -146,7 +148,7 @@ func TestInquirerVerifyHistorical(t *testing.T) { nkeys := keys.Extend(1) // Construct a bunch of commits, each with one more height than the last. - chainID := "inquiry-test" + chainID := testChainID count := 10 consHash := []byte("special-params") fcz := make([]FullCommit, count) @@ -229,7 +231,7 @@ func TestConcurrencyInquirerVerify(t *testing.T) { nkeys := keys.Extend(1) // Construct a bunch of commits, each with one more height than the last. - chainID := "inquiry-test" + chainID := testChainID count := 10 consHash := []byte("special-params") fcz := make([]FullCommit, count) diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 75442c726..5bb829b0a 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -3,7 +3,7 @@ package errors import ( "fmt" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) //---------------------------------------- @@ -49,15 +49,12 @@ func (e errEmptyTree) Error() string { // ErrCommitNotFound indicates that a the requested commit was not found. func ErrCommitNotFound() error { - return cmn.ErrorWrap(errCommitNotFound{}, "") + return errors.Wrap(errCommitNotFound{}, "") } func IsErrCommitNotFound(err error) bool { - if err_, ok := err.(cmn.Error); ok { - _, ok := err_.Data().(errCommitNotFound) - return ok - } - return false + _, ok := errors.Cause(err).(errCommitNotFound) + return ok } //----------------- @@ -65,18 +62,15 @@ func IsErrCommitNotFound(err error) bool { // ErrUnexpectedValidators indicates a validator set mismatch. func ErrUnexpectedValidators(got, want []byte) error { - return cmn.ErrorWrap(errUnexpectedValidators{ + return errors.Wrap(errUnexpectedValidators{ got: got, want: want, }, "") } func IsErrUnexpectedValidators(err error) bool { - if err_, ok := err.(cmn.Error); ok { - _, ok := err_.Data().(errUnexpectedValidators) - return ok - } - return false + _, ok := errors.Cause(err).(errUnexpectedValidators) + return ok } //----------------- @@ -84,28 +78,22 @@ func IsErrUnexpectedValidators(err error) bool { // ErrUnknownValidators indicates that some validator set was missing or unknown. func ErrUnknownValidators(chainID string, height int64) error { - return cmn.ErrorWrap(errUnknownValidators{chainID, height}, "") + return errors.Wrap(errUnknownValidators{chainID, height}, "") } func IsErrUnknownValidators(err error) bool { - if err_, ok := err.(cmn.Error); ok { - _, ok := err_.Data().(errUnknownValidators) - return ok - } - return false + _, ok := errors.Cause(err).(errUnknownValidators) + return ok } //----------------- // ErrEmptyTree func ErrEmptyTree() error { - return cmn.ErrorWrap(errEmptyTree{}, "") + return errors.Wrap(errEmptyTree{}, "") } func IsErrEmptyTree(err error) bool { - if err_, ok := err.(cmn.Error); ok { - _, ok := err_.Data().(errEmptyTree) - return ok - } - return false + _, ok := errors.Cause(err).(errEmptyTree) + return ok } diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go index 6a7c2354c..41923659f 100644 --- a/lite/proxy/errors.go +++ b/lite/proxy/errors.go @@ -1,7 +1,7 @@ package proxy import ( - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) type errNoData struct{} @@ -12,13 +12,10 @@ func (e errNoData) Error() string { // IsErrNoData checks whether an error is due to a query returning empty data func IsErrNoData(err error) bool { - if err_, ok := err.(cmn.Error); ok { - _, ok := err_.Data().(errNoData) - return ok - } - return false + _, ok := errors.Cause(err).(errNoData) + return ok } func ErrNoData() error { - return cmn.ErrorWrap(errNoData{}, "") + return errors.Wrap(errNoData{}, "") } diff --git a/lite/proxy/query.go b/lite/proxy/query.go index fd10e0bb6..518a6a235 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -4,9 +4,10 @@ import ( "fmt" "strings" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" "github.com/tendermint/tendermint/crypto/merkle" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" @@ -28,7 +29,7 @@ func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rp } res, err := GetWithProofOptions(prt, "/key", key, - rpcclient.ABCIQueryOptions{Height: int64(reqHeight), Prove: true}, + rpcclient.ABCIQueryOptions{Height: reqHeight, Prove: true}, node, cert) if err != nil { return @@ -83,7 +84,7 @@ func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts kp = kp.AppendKey(resp.Key, merkle.KeyEncodingURL) err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, kp.String(), resp.Value) if err != nil { - return nil, cmn.ErrorWrap(err, "Couldn't verify value proof") + return nil, errors.Wrap(err, "Couldn't verify value proof") } return &ctypes.ResultABCIQuery{Response: resp}, nil } else { @@ -92,7 +93,7 @@ func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts // XXX How do we encode the key into a string... err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key)) if err != nil { - return nil, cmn.ErrorWrap(err, "Couldn't verify absence proof") + return nil, errors.Wrap(err, "Couldn't verify absence proof") } return &ctypes.ResultABCIQuery{Response: resp}, nil } diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go index 2119a7aee..3673b66cf 100644 --- a/lite/proxy/verifier.go +++ b/lite/proxy/verifier.go @@ -1,7 +1,8 @@ package proxy import ( - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" + log "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/lite" lclient "github.com/tendermint/tendermint/lite/client" @@ -29,11 +30,11 @@ func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logge logger.Info("lite/proxy/NewVerifier found no trusted full commit, initializing from source from height 1...") fc, err := source.LatestFullCommit(chainID, 1, 1) if err != nil { - return nil, cmn.ErrorWrap(err, "fetching source full commit @ height 1") + return nil, errors.Wrap(err, "fetching source full commit @ height 1") } err = trust.SaveFullCommit(fc) if err != nil { - return nil, cmn.ErrorWrap(err, "saving full commit to trusted") + return nil, errors.Wrap(err, "saving full commit to trusted") } } diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 2d333e9fb..82fee97cd 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -58,7 +58,7 @@ func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { if !prove || err != nil { return res, err } - h := int64(res.Height) + h := res.Height sh, err := GetCertifiedCommit(h, w.Client, w.cert) if err != nil { return res, err diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index fc4591d29..ee47e52d8 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -53,8 +53,8 @@ type CListMempool struct { // Atomic integers height int64 // the last block Update()'d to - rechecking int32 // for re-checking filtered txs on Update() txsBytes int64 // total size of mempool, in bytes + rechecking int32 // for re-checking filtered txs on Update() // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -232,8 +232,8 @@ func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), t // The size of the corresponding amino-encoded TxMessage // can't be larger than the maxMsgSize, otherwise we can't // relay it to peers. - if max := calcMaxTxSize(mem.config.MaxMsgBytes); txSize > max { - return ErrTxTooLarge{max, txSize} + if txSize > mem.config.MaxTxBytes { + return ErrTxTooLarge{mem.config.MaxTxBytes, txSize} } if mem.preCheck != nil { diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 220bb5cb3..e45228479 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -426,8 +426,8 @@ func TestMempoolMaxMsgSize(t *testing.T) { mempl, cleanup := newMempoolWithApp(cc) defer cleanup() - maxMsgSize := mempl.config.MaxMsgBytes - maxTxSize := calcMaxTxSize(mempl.config.MaxMsgBytes) + maxTxSize := mempl.config.MaxTxBytes + maxMsgSize := calcMaxMsgSize(maxTxSize) testCases := []struct { len int @@ -564,7 +564,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { for i := 0; i < N; i++ { peerID := mrand.Intn(maxPeers) txNum := mrand.Intn(nTxs) - tx := txs[int(txNum)] + tx := txs[txNum] // this will err with ErrTxInCache many times ... mempool.CheckTxWithInfo(tx, nil, TxInfo{SenderID: uint16(peerID)}) diff --git a/mempool/reactor.go b/mempool/reactor.go index 0ca273401..349ed7ea5 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -263,8 +263,9 @@ func RegisterMempoolMessages(cdc *amino.Codec) { } func (memR *Reactor) decodeMsg(bz []byte) (msg MempoolMessage, err error) { - if l := len(bz); l > memR.config.MaxMsgBytes { - return msg, ErrTxTooLarge{memR.config.MaxMsgBytes, l} + maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes) + if l := len(bz); l > maxMsgSize { + return msg, ErrTxTooLarge{maxMsgSize, l} } err = cdc.UnmarshalBinaryBare(bz, &msg) return @@ -282,8 +283,8 @@ func (m *TxMessage) String() string { return fmt.Sprintf("[TxMessage %v]", m.Tx) } -// calcMaxTxSize returns the max size of Tx +// calcMaxMsgSize returns the max size of TxMessage // account for amino overhead of TxMessage -func calcMaxTxSize(maxMsgSize int) int { - return maxMsgSize - aminoOverheadForTxMessage +func calcMaxMsgSize(maxTxSize int) int { + return maxTxSize + aminoOverheadForTxMessage } diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh index c2d7c3a36..6ff02cb6f 100644 --- a/networks/remote/integration.sh +++ b/networks/remote/integration.sh @@ -30,7 +30,7 @@ go get $REPO cd $GOPATH/src/$REPO ## build -make get_tools +make tools make build # generate an ssh key diff --git a/node/node.go b/node/node.go index 18cb0ba3b..5c98ea5bf 100644 --- a/node/node.go +++ b/node/node.go @@ -23,7 +23,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" cs "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/evidence" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" @@ -278,9 +278,7 @@ func doHandshake(stateDB dbm.DB, state sm.State, blockStore sm.BlockStore, return nil } -func logNodeStartupInfo(state sm.State, privValidator types.PrivValidator, logger, - consensusLogger log.Logger) { - +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { // Log the version info. logger.Info("Version info", "software", version.TMCoreSemVer, @@ -296,7 +294,6 @@ func logNodeStartupInfo(state sm.State, privValidator types.PrivValidator, logge ) } - pubKey := privValidator.GetPubKey() addr := pubKey.Address() // Log whether this node is a validator or an observer if state.Validators.HasAddress(addr) { @@ -601,7 +598,13 @@ func NewNode(config *cfg.Config, } } - logNodeStartupInfo(state, privValidator, logger, consensusLogger) + pubKey := privValidator.GetPubKey() + if pubKey == nil { + // TODO: GetPubKey should return errors - https://github.com/tendermint/tendermint/issues/3602 + return nil, errors.New("could not retrieve public key from private validator") + } + + logNodeStartupInfo(state, pubKey, logger, consensusLogger) // Decide whether to fast-sync or not // We don't fast-sync when the only validator is us. @@ -1158,29 +1161,13 @@ func createAndStartPrivValidatorSocketClient( listenAddr string, logger log.Logger, ) (types.PrivValidator, error) { - var listener net.Listener - - protocol, address := cmn.ProtocolAndAddress(listenAddr) - ln, err := net.Listen(protocol, address) + pve, err := privval.NewSignerListener(listenAddr, logger) if err != nil { - return nil, err - } - switch protocol { - case "unix": - listener = privval.NewUnixListener(ln) - case "tcp": - // TODO: persist this key so external signer - // can actually authenticate us - listener = privval.NewTCPListener(ln, ed25519.GenPrivKey()) - default: - return nil, fmt.Errorf( - "wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", - protocol, - ) + return nil, errors.Wrap(err, "failed to start private validator") } - pvsc := privval.NewSignerValidatorEndpoint(logger.With("module", "privval"), listener) - if err := pvsc.Start(); err != nil { + pvsc, err := privval.NewSignerClient(pve) + if err != nil { return nil, errors.Wrap(err, "failed to start private validator") } diff --git a/node/node_test.go b/node/node_test.go index f031c13a9..6cdaceffb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -136,25 +136,29 @@ func TestNodeSetPrivValTCP(t *testing.T) { config.BaseConfig.PrivValidatorListenAddr = addr dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) - pvsc := privval.NewSignerServiceEndpoint( + dialerEndpoint := privval.NewSignerDialerEndpoint( log.TestingLogger(), + dialer, + ) + privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) + + signerServer := privval.NewSignerServer( + dialerEndpoint, config.ChainID(), types.NewMockPV(), - dialer, ) - privval.SignerServiceEndpointTimeoutReadWrite(100 * time.Millisecond)(pvsc) go func() { - err := pvsc.Start() + err := signerServer.Start() if err != nil { panic(err) } }() - defer pvsc.Stop() + defer signerServer.Stop() n, err := DefaultNewNode(config, log.TestingLogger()) require.NoError(t, err) - assert.IsType(t, &privval.SignerValidatorEndpoint{}, n.PrivValidator()) + assert.IsType(t, &privval.SignerClient{}, n.PrivValidator()) } // address without a protocol must result in error @@ -178,13 +182,17 @@ func TestNodeSetPrivValIPC(t *testing.T) { config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile dialer := privval.DialUnixFn(tmpfile) - pvsc := privval.NewSignerServiceEndpoint( + dialerEndpoint := privval.NewSignerDialerEndpoint( log.TestingLogger(), + dialer, + ) + privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) + + pvsc := privval.NewSignerServer( + dialerEndpoint, config.ChainID(), types.NewMockPV(), - dialer, ) - privval.SignerServiceEndpointTimeoutReadWrite(100 * time.Millisecond)(pvsc) go func() { err := pvsc.Start() @@ -194,8 +202,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { n, err := DefaultNewNode(config, log.TestingLogger()) require.NoError(t, err) - assert.IsType(t, &privval.SignerValidatorEndpoint{}, n.PrivValidator()) - + assert.IsType(t, &privval.SignerClient{}, n.PrivValidator()) } // testFreeAddr claims a free port so we don't block on listener being ready. diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index a206af542..231ec989f 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -2,7 +2,8 @@ package conn import ( "bufio" - "errors" + "runtime/debug" + "fmt" "io" "math" @@ -12,6 +13,8 @@ import ( "sync/atomic" "time" + "github.com/pkg/errors" + amino "github.com/tendermint/go-amino" cmn "github.com/tendermint/tendermint/libs/common" flow "github.com/tendermint/tendermint/libs/flowrate" @@ -313,8 +316,8 @@ func (c *MConnection) flush() { // Catch panics, usually caused by remote disconnects. func (c *MConnection) _recover() { if r := recover(); r != nil { - err := cmn.ErrorWrap(r, "recovered panic in MConnection") - c.stopForError(err) + c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) + c.stopForError(errors.Errorf("recovered from panic: %v", r)) } } @@ -800,7 +803,7 @@ func (ch *Channel) isSendPending() bool { // Not goroutine-safe func (ch *Channel) nextPacketMsg() PacketMsg { packet := PacketMsg{} - packet.ChannelID = byte(ch.desc.ID) + packet.ChannelID = ch.desc.ID maxSize := ch.maxPacketMsgPayloadSize packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] if len(ch.sending) <= maxSize { diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 91e3e2099..03a31ec63 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -133,7 +133,7 @@ func TestMConnectionReceive(t *testing.T) { select { case receivedBytes := <-receivedCh: - assert.Equal(t, []byte(msg), receivedBytes) + assert.Equal(t, msg, receivedBytes) case err := <-errorsCh: t.Fatalf("Expected %s, got %+v", msg, err) case <-time.After(500 * time.Millisecond): diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index a4489f475..c8e450f5b 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -188,7 +188,7 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { return n, err } } - return + return n, err } // CONTRACT: data smaller than dataMaxSize is read atomically. @@ -234,7 +234,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { sc.recvBuffer = make([]byte, len(chunk)-n) copy(sc.recvBuffer, chunk[n:]) } - return + return n, err } // Implements net.Conn diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 9ab9695a3..0b7cc00c3 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -87,7 +87,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection require.Nil(tb, trs.FirstError()) require.True(tb, ok, "Unexpected task abortion") - return + return fooSecConn, barSecConn } func TestSecretConnectionHandshake(t *testing.T) { @@ -110,6 +110,7 @@ func TestShareLowOrderPubkey(t *testing.T) { // all blacklisted low order points: for _, remLowOrderPubKey := range blacklist { + remLowOrderPubKey := remLowOrderPubKey _, _ = cmn.Parallel( func(_ int) (val interface{}, err error, abort bool) { _, err = shareEphPubKey(fooConn, locEphPub) @@ -135,6 +136,7 @@ func TestShareLowOrderPubkey(t *testing.T) { func TestComputeDHFailsOnLowOrder(t *testing.T) { _, locPrivKey := genEphKeys() for _, remLowOrderPubKey := range blacklist { + remLowOrderPubKey := remLowOrderPubKey shared, err := computeDHSecret(&remLowOrderPubKey, locPrivKey) assert.Error(t, err) diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index e7d82cd77..a3dd40f30 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -67,6 +67,7 @@ func TestNewNetAddressString(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { addr, err := NewNetAddressString(tc.addr) if tc.correct { diff --git a/p2p/peer_test.go b/p2p/peer_test.go index bf61beb4f..37a3009c0 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -113,13 +114,13 @@ func testOutboundPeerConn( var pc peerConn conn, err := testDial(addr, config) if err != nil { - return pc, cmn.ErrorWrap(err, "Error creating peer") + return pc, errors.Wrap(err, "Error creating peer") } pc, err = testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) if err != nil { if cerr := conn.Close(); cerr != nil { - return pc, cmn.ErrorWrap(err, cerr.Error()) + return pc, errors.Wrap(err, cerr.Error()) } return pc, err } @@ -127,7 +128,7 @@ func testOutboundPeerConn( // ensure dialed ID matches connection ID if addr.ID != pc.ID() { if cerr := conn.Close(); cerr != nil { - return pc, cmn.ErrorWrap(err, cerr.Error()) + return pc, errors.Wrap(err, cerr.Error()) } return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()} } diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 27bcef9e8..a64eb28a5 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -784,12 +784,12 @@ func (a *addrBook) groupKey(na *p2p.NetAddress) string { } if na.RFC6145() || na.RFC6052() { // last four bytes are the ip address - ip := net.IP(na.IP[12:16]) + ip := na.IP[12:16] return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() } if na.RFC3964() { - ip := net.IP(na.IP[2:7]) + ip := na.IP[2:7] return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() } diff --git a/p2p/switch.go b/p2p/switch.go index 3681dd942..4898b80c9 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -222,7 +222,7 @@ func (sw *Switch) OnStart() error { for _, reactor := range sw.reactors { err := reactor.Start() if err != nil { - return cmn.ErrorWrap(err, "failed to start %v", reactor) + return errors.Wrapf(err, "failed to start %v", reactor) } } diff --git a/p2p/test_util.go b/p2p/test_util.go index fa175aeb4..a14073f99 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -5,6 +5,8 @@ import ( "net" "time" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" @@ -233,7 +235,7 @@ func testPeerConn( // Encrypt connection conn, err = upgradeSecretConn(conn, cfg.HandshakeTimeout, ourNodePrivKey) if err != nil { - return pc, cmn.ErrorWrap(err, "Error creating peer") + return pc, errors.Wrap(err, "Error creating peer") } // Only the information we already have diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go index 16a537241..9adf72c6b 100644 --- a/p2p/upnp/probe.go +++ b/p2p/upnp/probe.go @@ -78,7 +78,7 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp // Wait for data receipt time.Sleep(1 * time.Second) - return + return supportsHairpin } func Probe(logger log.Logger) (caps UPNPCapabilities, err error) { diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go index 89f35c5df..0be23ea6d 100644 --- a/p2p/upnp/upnp.go +++ b/p2p/upnp/upnp.go @@ -105,7 +105,7 @@ func Discover() (nat NAT, err error) { } } err = errors.New("UPnP port discovery failed") - return + return nat, err } type Envelope struct { @@ -241,7 +241,7 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) { // Extract the domain name, which isn't always 'schemas-upnp-org' urnDomain = strings.Split(d.ServiceType, ":")[1] url = combineURL(rootURL, d.ControlURL) - return + return url, urnDomain, err } func combineURL(rootURL, subURL string) string { @@ -285,7 +285,7 @@ func soapRequest(url, function, message, domain string) (r *http.Response, err e r = nil return } - return + return r, err } type statusInfo struct { @@ -322,7 +322,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { return } - return + return info, err } // GetExternalAddress returns an external IP. If GetExternalIPAddress action diff --git a/privval/doc.go b/privval/doc.go index 80869a6a7..ad60673b6 100644 --- a/privval/doc.go +++ b/privval/doc.go @@ -6,16 +6,16 @@ FilePV FilePV is the simplest implementation and developer default. It uses one file for the private key and another to store state. -SignerValidatorEndpoint +SignerListenerEndpoint -SignerValidatorEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket. -SignerValidatorEndpoint listens for the external KMS process to dial in. -SignerValidatorEndpoint takes a listener, which determines the type of connection +SignerListenerEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket. +SignerListenerEndpoint listens for the external KMS process to dial in. +SignerListenerEndpoint takes a listener, which determines the type of connection (ie. encrypted over tcp, or unencrypted over unix). -SignerServiceEndpoint +SignerDialerEndpoint -SignerServiceEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal. +SignerDialerEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal. */ package privval diff --git a/privval/errors.go b/privval/errors.go index 75fb25fc6..9f151f11d 100644 --- a/privval/errors.go +++ b/privval/errors.go @@ -4,10 +4,21 @@ import ( "fmt" ) +type EndpointTimeoutError struct{} + +// Implement the net.Error interface. +func (e EndpointTimeoutError) Error() string { return "endpoint connection timed out" } +func (e EndpointTimeoutError) Timeout() bool { return true } +func (e EndpointTimeoutError) Temporary() bool { return true } + // Socket errors. var ( ErrUnexpectedResponse = fmt.Errorf("received unexpected response") - ErrConnTimeout = fmt.Errorf("remote signer timed out") + ErrNoConnection = fmt.Errorf("endpoint is not connected") + ErrConnectionTimeout = EndpointTimeoutError{} + + ErrReadTimeout = fmt.Errorf("endpoint read timed out") + ErrWriteTimeout = fmt.Errorf("endpoint write timed out") ) // RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. @@ -18,5 +29,5 @@ type RemoteSignerError struct { } func (e *RemoteSignerError) Error() string { - return fmt.Sprintf("signerServiceEndpoint returned error #%d: %s", e.Code, e.Description) + return fmt.Sprintf("signerEndpoint returned error #%d: %s", e.Code, e.Description) } diff --git a/privval/file_deprecated_test.go b/privval/file_deprecated_test.go index 46391a3fe..e678bfc09 100644 --- a/privval/file_deprecated_test.go +++ b/privval/file_deprecated_test.go @@ -67,11 +67,11 @@ func assertEqualPV(t *testing.T, oldPV *privval.OldFilePV, newPV *privval.FilePV } func initTmpOldFile(t *testing.T) string { - tmpfile, err := ioutil.TempFile("", "priv_validator_*.json") + tmpFile, err := ioutil.TempFile("", "priv_validator_*.json") require.NoError(t, err) - t.Logf("created test file %s", tmpfile.Name()) - _, err = tmpfile.WriteString(oldPrivvalContent) + t.Logf("created test file %s", tmpFile.Name()) + _, err = tmpFile.WriteString(oldPrivvalContent) require.NoError(t, err) - return tmpfile.Name() + return tmpFile.Name() } diff --git a/privval/file_test.go b/privval/file_test.go index 98de69480..38f6e6fe3 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -58,7 +58,7 @@ func TestResetValidator(t *testing.T) { // priv val after signing is not same as empty assert.NotEqual(t, privVal.LastSignState, emptyState) - // priv val after reset is same as empty + // priv val after AcceptNewConnection is same as empty privVal.Reset() assert.Equal(t, privVal.LastSignState, emptyState) } @@ -164,6 +164,7 @@ func TestSignVote(t *testing.T) { block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}} block2 := types.BlockID{Hash: []byte{3, 2, 1}, PartsHeader: types.PartSetHeader{}} + height, round := int64(10), 1 voteType := byte(types.PrevoteType) diff --git a/privval/messages.go b/privval/messages.go index 6774a2795..c172a5ea1 100644 --- a/privval/messages.go +++ b/privval/messages.go @@ -6,56 +6,59 @@ import ( "github.com/tendermint/tendermint/types" ) -// RemoteSignerMsg is sent between SignerServiceEndpoint and the SignerServiceEndpoint client. -type RemoteSignerMsg interface{} +// SignerMessage is sent between Signer Clients and Servers. +type SignerMessage interface{} func RegisterRemoteSignerMsg(cdc *amino.Codec) { - cdc.RegisterInterface((*RemoteSignerMsg)(nil), nil) + cdc.RegisterInterface((*SignerMessage)(nil), nil) cdc.RegisterConcrete(&PubKeyRequest{}, "tendermint/remotesigner/PubKeyRequest", nil) cdc.RegisterConcrete(&PubKeyResponse{}, "tendermint/remotesigner/PubKeyResponse", nil) cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/remotesigner/SignVoteRequest", nil) cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil) cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil) cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil) + cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil) cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil) } +// TODO: Add ChainIDRequest + // PubKeyRequest requests the consensus public key from the remote signer. type PubKeyRequest struct{} -// PubKeyResponse is a PrivValidatorSocket message containing the public key. +// PubKeyResponse is a response message containing the public key. type PubKeyResponse struct { PubKey crypto.PubKey Error *RemoteSignerError } -// SignVoteRequest is a PrivValidatorSocket message containing a vote. +// SignVoteRequest is a request to sign a vote type SignVoteRequest struct { Vote *types.Vote } -// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message. +// SignedVoteResponse is a response containing a signed vote or an error type SignedVoteResponse struct { Vote *types.Vote Error *RemoteSignerError } -// SignProposalRequest is a PrivValidatorSocket message containing a Proposal. +// SignProposalRequest is a request to sign a proposal type SignProposalRequest struct { Proposal *types.Proposal } -// SignedProposalResponse is a PrivValidatorSocket message containing a proposal response +// SignedProposalResponse is response containing a signed proposal or an error type SignedProposalResponse struct { Proposal *types.Proposal Error *RemoteSignerError } -// PingRequest is a PrivValidatorSocket message to keep the connection alive. +// PingRequest is a request to confirm that the connection is alive. type PingRequest struct { } -// PingRequest is a PrivValidatorSocket response to keep the connection alive. +// PingResponse is a response to confirm that the connection is alive. type PingResponse struct { } diff --git a/privval/signer_client.go b/privval/signer_client.go new file mode 100644 index 000000000..0885ee4aa --- /dev/null +++ b/privval/signer_client.go @@ -0,0 +1,131 @@ +package privval + +import ( + "time" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" +) + +// SignerClient implements PrivValidator. +// Handles remote validator connections that provide signing services +type SignerClient struct { + endpoint *SignerListenerEndpoint +} + +var _ types.PrivValidator = (*SignerClient)(nil) + +// NewSignerClient returns an instance of SignerClient. +// it will start the endpoint (if not already started) +func NewSignerClient(endpoint *SignerListenerEndpoint) (*SignerClient, error) { + if !endpoint.IsRunning() { + if err := endpoint.Start(); err != nil { + return nil, errors.Wrap(err, "failed to start listener endpoint") + } + } + + return &SignerClient{endpoint: endpoint}, nil +} + +// Close closes the underlying connection +func (sc *SignerClient) Close() error { + return sc.endpoint.Close() +} + +// IsConnected indicates with the signer is connected to a remote signing service +func (sc *SignerClient) IsConnected() bool { + return sc.endpoint.IsConnected() +} + +// WaitForConnection waits maxWait for a connection or returns a timeout error +func (sc *SignerClient) WaitForConnection(maxWait time.Duration) error { + return sc.endpoint.WaitForConnection(maxWait) +} + +//-------------------------------------------------------- +// Implement PrivValidator + +// Ping sends a ping request to the remote signer +func (sc *SignerClient) Ping() error { + response, err := sc.endpoint.SendRequest(&PingRequest{}) + + if err != nil { + sc.endpoint.Logger.Error("SignerClient::Ping", "err", err) + return nil + } + + _, ok := response.(*PingResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::Ping", "err", "response != PingResponse") + return err + } + + return nil +} + +// GetPubKey retrieves a public key from a remote signer +func (sc *SignerClient) GetPubKey() crypto.PubKey { + response, err := sc.endpoint.SendRequest(&PubKeyRequest{}) + if err != nil { + sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", err) + return nil + } + + pubKeyResp, ok := response.(*PubKeyResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != PubKeyResponse") + return nil + } + + if pubKeyResp.Error != nil { + sc.endpoint.Logger.Error("failed to get private validator's public key", "err", pubKeyResp.Error) + return nil + } + + return pubKeyResp.PubKey +} + +// SignVote requests a remote signer to sign a vote +func (sc *SignerClient) SignVote(chainID string, vote *types.Vote) error { + response, err := sc.endpoint.SendRequest(&SignVoteRequest{Vote: vote}) + if err != nil { + sc.endpoint.Logger.Error("SignerClient::SignVote", "err", err) + return err + } + + resp, ok := response.(*SignedVoteResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != SignedVoteResponse") + return ErrUnexpectedResponse + } + + if resp.Error != nil { + return resp.Error + } + *vote = *resp.Vote + + return nil +} + +// SignProposal requests a remote signer to sign a proposal +func (sc *SignerClient) SignProposal(chainID string, proposal *types.Proposal) error { + response, err := sc.endpoint.SendRequest(&SignProposalRequest{Proposal: proposal}) + if err != nil { + sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", err) + return err + } + + resp, ok := response.(*SignedProposalResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", "response != SignedProposalResponse") + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *proposal = *resp.Proposal + + return nil +} diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go new file mode 100644 index 000000000..3d7cfb3e0 --- /dev/null +++ b/privval/signer_client_test.go @@ -0,0 +1,257 @@ +package privval + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" +) + +type signerTestCase struct { + chainID string + mockPV types.PrivValidator + signerClient *SignerClient + signerServer *SignerServer +} + +func getSignerTestCases(t *testing.T) []signerTestCase { + testCases := make([]signerTestCase, 0) + + // Get test cases for each possible dialer (DialTCP / DialUnix / etc) + for _, dtc := range getDialerTestCases(t) { + chainID := common.RandStr(12) + mockPV := types.NewMockPV() + + // get a pair of signer listener, signer dialer endpoints + sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) + sc, err := NewSignerClient(sl) + require.NoError(t, err) + ss := NewSignerServer(sd, chainID, mockPV) + + err = ss.Start() + require.NoError(t, err) + + tc := signerTestCase{ + chainID: chainID, + mockPV: mockPV, + signerClient: sc, + signerServer: ss, + } + + testCases = append(testCases, tc) + } + + return testCases +} + +func TestSignerClose(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + err := tc.signerClient.Close() + assert.NoError(t, err) + + err = tc.signerServer.Stop() + assert.NoError(t, err) + } +} + +func TestSignerPing(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + err := tc.signerClient.Ping() + assert.NoError(t, err) + } +} + +func TestSignerGetPubKey(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + pubKey := tc.signerClient.GetPubKey() + expectedPubKey := tc.mockPV.GetPubKey() + + assert.Equal(t, expectedPubKey, pubKey) + + addr := tc.signerClient.GetPubKey().Address() + expectedAddr := tc.mockPV.GetPubKey().Address() + + assert.Equal(t, expectedAddr, addr) + } +} + +func TestSignerProposal(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Proposal{Timestamp: ts} + have := &types.Proposal{Timestamp: ts} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + require.NoError(t, tc.mockPV.SignProposal(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignProposal(tc.chainID, have)) + + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerVote(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerVoteResetDeadline(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + + // TODO(jleni): Clarify what is actually being tested + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerVoteKeepAlive(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + // Check that even if the client does not request a + // signature for a long time. The service is still available + + // in this particular case, we use the dialer logger to ensure that + // test messages are properly interleaved in the test logs + tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") + time.Sleep(testTimeoutReadWrite * 3) + tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerSignProposalErrors(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + // Replace service with a mock that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + ts := time.Now() + proposal := &types.Proposal{Timestamp: ts} + err := tc.signerClient.SignProposal(tc.chainID, proposal) + require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignProposal(tc.chainID, proposal) + require.Error(t, err) + + err = tc.signerClient.SignProposal(tc.chainID, proposal) + require.Error(t, err) + } +} + +func TestSignerSignVoteErrors(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + vote := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + // Replace signer service privval with one that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + err := tc.signerClient.SignVote(tc.chainID, vote) + require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignVote(tc.chainID, vote) + require.Error(t, err) + + err = tc.signerClient.SignVote(tc.chainID, vote) + require.Error(t, err) + } +} + +func brokenHandler(privVal types.PrivValidator, request SignerMessage, chainID string) (SignerMessage, error) { + var res SignerMessage + var err error + + switch r := request.(type) { + + // This is broken and will answer most requests with a pubkey response + case *PubKeyRequest: + res = &PubKeyResponse{nil, nil} + case *SignVoteRequest: + res = &PubKeyResponse{nil, nil} + case *SignProposalRequest: + res = &PubKeyResponse{nil, nil} + + case *PingRequest: + err, res = nil, &PingResponse{} + + default: + err = fmt.Errorf("unknown msg: %v", r) + } + + return res, err +} + +func TestSignerUnexpectedResponse(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + tc.signerServer.privVal = types.NewMockPV() + tc.mockPV = types.NewMockPV() + + tc.signerServer.SetRequestHandler(brokenHandler) + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + e := tc.signerClient.SignVote(tc.chainID, want) + assert.EqualError(t, e, "received unexpected response") + } +} diff --git a/privval/signer_dialer_endpoint.go b/privval/signer_dialer_endpoint.go new file mode 100644 index 000000000..95094c6d0 --- /dev/null +++ b/privval/signer_dialer_endpoint.go @@ -0,0 +1,84 @@ +package privval + +import ( + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultMaxDialRetries = 10 + defaultRetryWaitMilliseconds = 100 +) + +// SignerServiceEndpointOption sets an optional parameter on the SignerDialerEndpoint. +type SignerServiceEndpointOption func(*SignerDialerEndpoint) + +// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for connections +// from external signing processes. +func SignerDialerEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption { + return func(ss *SignerDialerEndpoint) { ss.timeoutReadWrite = timeout } +} + +// SignerDialerEndpointConnRetries sets the amount of attempted retries to acceptNewConnection. +func SignerDialerEndpointConnRetries(retries int) SignerServiceEndpointOption { + return func(ss *SignerDialerEndpoint) { ss.maxConnRetries = retries } +} + +// SignerDialerEndpoint dials using its dialer and responds to any +// signature requests using its privVal. +type SignerDialerEndpoint struct { + signerEndpoint + + dialer SocketDialer + + retryWait time.Duration + maxConnRetries int +} + +// NewSignerDialerEndpoint returns a SignerDialerEndpoint that will dial using the given +// dialer and respond to any signature requests over the connection +// using the given privVal. +func NewSignerDialerEndpoint( + logger log.Logger, + dialer SocketDialer, +) *SignerDialerEndpoint { + + sd := &SignerDialerEndpoint{ + dialer: dialer, + retryWait: defaultRetryWaitMilliseconds * time.Millisecond, + maxConnRetries: defaultMaxDialRetries, + } + + sd.BaseService = *cmn.NewBaseService(logger, "SignerDialerEndpoint", sd) + sd.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + + return sd +} + +func (sd *SignerDialerEndpoint) ensureConnection() error { + if sd.IsConnected() { + return nil + } + + retries := 0 + for retries < sd.maxConnRetries { + conn, err := sd.dialer() + + if err != nil { + retries++ + sd.Logger.Debug("SignerDialer: Reconnection failed", "retries", retries, "max", sd.maxConnRetries, "err", err) + // Wait between retries + time.Sleep(sd.retryWait) + } else { + sd.SetConnection(conn) + sd.Logger.Debug("SignerDialer: Connection Ready") + return nil + } + } + + sd.Logger.Debug("SignerDialer: Max retries exceeded", "retries", retries, "max", sd.maxConnRetries) + + return ErrNoConnection +} diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go new file mode 100644 index 000000000..425f73fea --- /dev/null +++ b/privval/signer_endpoint.go @@ -0,0 +1,156 @@ +package privval + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/pkg/errors" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + defaultTimeoutReadWriteSeconds = 3 +) + +type signerEndpoint struct { + cmn.BaseService + + connMtx sync.Mutex + conn net.Conn + + timeoutReadWrite time.Duration +} + +// Close closes the underlying net.Conn. +func (se *signerEndpoint) Close() error { + se.DropConnection() + return nil +} + +// IsConnected indicates if there is an active connection +func (se *signerEndpoint) IsConnected() bool { + se.connMtx.Lock() + defer se.connMtx.Unlock() + return se.isConnected() +} + +// TryGetConnection retrieves a connection if it is already available +func (se *signerEndpoint) GetAvailableConnection(connectionAvailableCh chan net.Conn) bool { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + // Is there a connection ready? + select { + case se.conn = <-connectionAvailableCh: + return true + default: + } + return false +} + +// TryGetConnection retrieves a connection if it is already available +func (se *signerEndpoint) WaitConnection(connectionAvailableCh chan net.Conn, maxWait time.Duration) error { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + select { + case se.conn = <-connectionAvailableCh: + case <-time.After(maxWait): + return ErrConnectionTimeout + } + + return nil +} + +// SetConnection replaces the current connection object +func (se *signerEndpoint) SetConnection(newConnection net.Conn) { + se.connMtx.Lock() + defer se.connMtx.Unlock() + se.conn = newConnection +} + +// IsConnected indicates if there is an active connection +func (se *signerEndpoint) DropConnection() { + se.connMtx.Lock() + defer se.connMtx.Unlock() + se.dropConnection() +} + +// ReadMessage reads a message from the endpoint +func (se *signerEndpoint) ReadMessage() (msg SignerMessage, err error) { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + if !se.isConnected() { + return nil, fmt.Errorf("endpoint is not connected") + } + + // Reset read deadline + deadline := time.Now().Add(se.timeoutReadWrite) + + err = se.conn.SetReadDeadline(deadline) + if err != nil { + return + } + + const maxRemoteSignerMsgSize = 1024 * 10 + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(se.conn, &msg, maxRemoteSignerMsgSize) + if _, ok := err.(timeoutError); ok { + if err != nil { + err = errors.Wrap(ErrReadTimeout, err.Error()) + } else { + err = errors.Wrap(ErrReadTimeout, "Empty error") + } + se.Logger.Debug("Dropping [read]", "obj", se) + se.dropConnection() + } + + return +} + +// WriteMessage writes a message from the endpoint +func (se *signerEndpoint) WriteMessage(msg SignerMessage) (err error) { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + if !se.isConnected() { + return errors.Wrap(ErrNoConnection, "endpoint is not connected") + } + + // Reset read deadline + deadline := time.Now().Add(se.timeoutReadWrite) + se.Logger.Debug("Write::Error Resetting deadline", "obj", se) + + err = se.conn.SetWriteDeadline(deadline) + if err != nil { + return + } + + _, err = cdc.MarshalBinaryLengthPrefixedWriter(se.conn, msg) + if _, ok := err.(timeoutError); ok { + if err != nil { + err = errors.Wrap(ErrWriteTimeout, err.Error()) + } else { + err = errors.Wrap(ErrWriteTimeout, "Empty error") + } + se.dropConnection() + } + + return +} + +func (se *signerEndpoint) isConnected() bool { + return se.conn != nil +} + +func (se *signerEndpoint) dropConnection() { + if se.conn != nil { + if err := se.conn.Close(); err != nil { + se.Logger.Error("signerEndpoint::dropConnection", "err", err) + } + se.conn = nil + } +} diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go new file mode 100644 index 000000000..e25f18756 --- /dev/null +++ b/privval/signer_listener_endpoint.go @@ -0,0 +1,198 @@ +package privval + +import ( + "fmt" + "net" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +// SignerValidatorEndpointOption sets an optional parameter on the SocketVal. +type SignerValidatorEndpointOption func(*SignerListenerEndpoint) + +// SignerListenerEndpoint listens for an external process to dial in +// and keeps the connection alive by dropping and reconnecting +type SignerListenerEndpoint struct { + signerEndpoint + + listener net.Listener + connectRequestCh chan struct{} + connectionAvailableCh chan net.Conn + + timeoutAccept time.Duration + pingTimer *time.Ticker + + instanceMtx sync.Mutex // Ensures instance public methods access, i.e. SendRequest +} + +// NewSignerListenerEndpoint returns an instance of SignerListenerEndpoint. +func NewSignerListenerEndpoint( + logger log.Logger, + listener net.Listener, +) *SignerListenerEndpoint { + sc := &SignerListenerEndpoint{ + listener: listener, + timeoutAccept: defaultTimeoutAcceptSeconds * time.Second, + } + + sc.BaseService = *cmn.NewBaseService(logger, "SignerListenerEndpoint", sc) + sc.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + return sc +} + +// OnStart implements cmn.Service. +func (sl *SignerListenerEndpoint) OnStart() error { + sl.connectRequestCh = make(chan struct{}) + sl.connectionAvailableCh = make(chan net.Conn) + + sl.pingTimer = time.NewTicker(defaultPingPeriodMilliseconds * time.Millisecond) + + go sl.serviceLoop() + go sl.pingLoop() + + sl.connectRequestCh <- struct{}{} + + return nil +} + +// OnStop implements cmn.Service +func (sl *SignerListenerEndpoint) OnStop() { + sl.instanceMtx.Lock() + defer sl.instanceMtx.Unlock() + _ = sl.Close() + + // Stop listening + if sl.listener != nil { + if err := sl.listener.Close(); err != nil { + sl.Logger.Error("Closing Listener", "err", err) + sl.listener = nil + } + } + + sl.pingTimer.Stop() +} + +// WaitForConnection waits maxWait for a connection or returns a timeout error +func (sl *SignerListenerEndpoint) WaitForConnection(maxWait time.Duration) error { + sl.instanceMtx.Lock() + defer sl.instanceMtx.Unlock() + return sl.ensureConnection(maxWait) +} + +// SendRequest ensures there is a connection, sends a request and waits for a response +func (sl *SignerListenerEndpoint) SendRequest(request SignerMessage) (SignerMessage, error) { + sl.instanceMtx.Lock() + defer sl.instanceMtx.Unlock() + + err := sl.ensureConnection(sl.timeoutAccept) + if err != nil { + return nil, err + } + + err = sl.WriteMessage(request) + if err != nil { + return nil, err + } + + res, err := sl.ReadMessage() + if err != nil { + return nil, err + } + + return res, nil +} + +func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error { + if sl.IsConnected() { + return nil + } + + // Is there a connection ready? then use it + if sl.GetAvailableConnection(sl.connectionAvailableCh) { + return nil + } + + // block until connected or timeout + sl.triggerConnect() + err := sl.WaitConnection(sl.connectionAvailableCh, maxWait) + if err != nil { + return err + } + + return nil +} + +func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { + if !sl.IsRunning() || sl.listener == nil { + return nil, fmt.Errorf("endpoint is closing") + } + + // wait for a new conn + sl.Logger.Info("SignerListener: Listening for new connection") + conn, err := sl.listener.Accept() + if err != nil { + return nil, err + } + + return conn, nil +} + +func (sl *SignerListenerEndpoint) triggerConnect() { + select { + case sl.connectRequestCh <- struct{}{}: + default: + } +} + +func (sl *SignerListenerEndpoint) triggerReconnect() { + sl.DropConnection() + sl.triggerConnect() +} + +func (sl *SignerListenerEndpoint) serviceLoop() { + for { + select { + case <-sl.connectRequestCh: + { + conn, err := sl.acceptNewConnection() + if err == nil { + sl.Logger.Info("SignerListener: Connected") + + // We have a good connection, wait for someone that needs one otherwise cancellation + select { + case sl.connectionAvailableCh <- conn: + case <-sl.Quit(): + return + } + } + + select { + case sl.connectRequestCh <- struct{}{}: + default: + } + } + case <-sl.Quit(): + return + } + } +} + +func (sl *SignerListenerEndpoint) pingLoop() { + for { + select { + case <-sl.pingTimer.C: + { + _, err := sl.SendRequest(&PingRequest{}) + if err != nil { + sl.Logger.Error("SignerListener: Ping timeout") + sl.triggerReconnect() + } + } + case <-sl.Quit(): + return + } + } +} diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go new file mode 100644 index 000000000..7058ff8b8 --- /dev/null +++ b/privval/signer_listener_endpoint_test.go @@ -0,0 +1,198 @@ +package privval + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +var ( + testTimeoutAccept = defaultTimeoutAcceptSeconds * time.Second + + testTimeoutReadWrite = 100 * time.Millisecond + testTimeoutReadWrite2o3 = 60 * time.Millisecond // 2/3 of the other one +) + +type dialerTestCase struct { + addr string + dialer SocketDialer +} + +// TestSignerRemoteRetryTCPOnly will test connection retry attempts over TCP. We +// don't need this for Unix sockets because the OS instantly knows the state of +// both ends of the socket connection. This basically causes the +// SignerDialerEndpoint.dialer() call inside SignerDialerEndpoint.acceptNewConnection() to return +// successfully immediately, putting an instant stop to any retry attempts. +func TestSignerRemoteRetryTCPOnly(t *testing.T) { + var ( + attemptCh = make(chan int) + retries = 10 + ) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + // Continuously Accept connection and close {attempts} times + go func(ln net.Listener, attemptCh chan<- int) { + attempts := 0 + for { + conn, err := ln.Accept() + require.NoError(t, err) + + err = conn.Close() + require.NoError(t, err) + + attempts++ + + if attempts == retries { + attemptCh <- attempts + break + } + } + }(ln, attemptCh) + + dialerEndpoint := NewSignerDialerEndpoint( + log.TestingLogger(), + DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()), + ) + SignerDialerEndpointTimeoutReadWrite(time.Millisecond)(dialerEndpoint) + SignerDialerEndpointConnRetries(retries)(dialerEndpoint) + + chainId := cmn.RandStr(12) + mockPV := types.NewMockPV() + signerServer := NewSignerServer(dialerEndpoint, chainId, mockPV) + + err = signerServer.Start() + require.NoError(t, err) + defer signerServer.Stop() + + select { + case attempts := <-attemptCh: + assert.Equal(t, retries, attempts) + case <-time.After(1500 * time.Millisecond): + t.Error("expected remote to observe connection attempts") + } +} + +func TestRetryConnToRemoteSigner(t *testing.T) { + for _, tc := range getDialerTestCases(t) { + var ( + logger = log.TestingLogger() + chainID = cmn.RandStr(12) + mockPV = types.NewMockPV() + endpointIsOpenCh = make(chan struct{}) + thisConnTimeout = testTimeoutReadWrite + listenerEndpoint = newSignerListenerEndpoint(logger, tc.addr, thisConnTimeout) + ) + + dialerEndpoint := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) + SignerDialerEndpointConnRetries(10)(dialerEndpoint) + + signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) + + startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + defer listenerEndpoint.Stop() + + require.NoError(t, signerServer.Start()) + assert.True(t, signerServer.IsRunning()) + <-endpointIsOpenCh + signerServer.Stop() + + dialerEndpoint2 := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) + + // let some pings pass + require.NoError(t, signerServer2.Start()) + assert.True(t, signerServer2.IsRunning()) + defer signerServer2.Stop() + + // give the client some time to re-establish the conn to the remote signer + // should see sth like this in the logs: + // + // E[10016-01-10|17:12:46.128] Ping err="remote signer timed out" + // I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal + time.Sleep(testTimeoutReadWrite * 2) + } +} + +/////////////////////////////////// + +func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { + proto, address := cmn.ProtocolAndAddress(addr) + + ln, err := net.Listen(proto, address) + logger.Info("SignerListener: Listening", "proto", proto, "address", address) + if err != nil { + panic(err) + } + + var listener net.Listener + + if proto == "unix" { + unixLn := NewUnixListener(ln) + UnixListenerTimeoutAccept(testTimeoutAccept)(unixLn) + UnixListenerTimeoutReadWrite(timeoutReadWrite)(unixLn) + listener = unixLn + } else { + tcpLn := NewTCPListener(ln, ed25519.GenPrivKey()) + TCPListenerTimeoutAccept(testTimeoutAccept)(tcpLn) + TCPListenerTimeoutReadWrite(timeoutReadWrite)(tcpLn) + listener = tcpLn + } + + return NewSignerListenerEndpoint(logger, listener) +} + +func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { + go func(sle *SignerListenerEndpoint) { + require.NoError(t, sle.Start()) + assert.True(t, sle.IsRunning()) + close(endpointIsOpenCh) + }(sle) +} + +func getMockEndpoints( + t *testing.T, + addr string, + socketDialer SocketDialer, +) (*SignerListenerEndpoint, *SignerDialerEndpoint) { + + var ( + logger = log.TestingLogger() + endpointIsOpenCh = make(chan struct{}) + + dialerEndpoint = NewSignerDialerEndpoint( + logger, + socketDialer, + ) + + listenerEndpoint = newSignerListenerEndpoint(logger, addr, testTimeoutReadWrite) + ) + + SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) + SignerDialerEndpointConnRetries(1e6)(dialerEndpoint) + + startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + + require.NoError(t, dialerEndpoint.Start()) + assert.True(t, dialerEndpoint.IsRunning()) + + <-endpointIsOpenCh + + return listenerEndpoint, dialerEndpoint +} diff --git a/privval/signer_remote.go b/privval/signer_remote.go deleted file mode 100644 index 53b0cb773..000000000 --- a/privval/signer_remote.go +++ /dev/null @@ -1,192 +0,0 @@ -package privval - -import ( - "fmt" - "io" - "net" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/types" -) - -// SignerRemote implements PrivValidator. -// It uses a net.Conn to request signatures from an external process. -type SignerRemote struct { - conn net.Conn - - // memoized - consensusPubKey crypto.PubKey -} - -// Check that SignerRemote implements PrivValidator. -var _ types.PrivValidator = (*SignerRemote)(nil) - -// NewSignerRemote returns an instance of SignerRemote. -func NewSignerRemote(conn net.Conn) (*SignerRemote, error) { - - // retrieve and memoize the consensus public key once. - pubKey, err := getPubKey(conn) - if err != nil { - return nil, cmn.ErrorWrap(err, "error while retrieving public key for remote signer") - } - return &SignerRemote{ - conn: conn, - consensusPubKey: pubKey, - }, nil -} - -// Close calls Close on the underlying net.Conn. -func (sc *SignerRemote) Close() error { - return sc.conn.Close() -} - -// GetPubKey implements PrivValidator. -func (sc *SignerRemote) GetPubKey() crypto.PubKey { - return sc.consensusPubKey -} - -// not thread-safe (only called on startup). -func getPubKey(conn net.Conn) (crypto.PubKey, error) { - err := writeMsg(conn, &PubKeyRequest{}) - if err != nil { - return nil, err - } - - res, err := readMsg(conn) - if err != nil { - return nil, err - } - - pubKeyResp, ok := res.(*PubKeyResponse) - if !ok { - return nil, errors.Wrap(ErrUnexpectedResponse, "response is not PubKeyResponse") - } - - if pubKeyResp.Error != nil { - return nil, errors.Wrap(pubKeyResp.Error, "failed to get private validator's public key") - } - - return pubKeyResp.PubKey, nil -} - -// SignVote implements PrivValidator. -func (sc *SignerRemote) SignVote(chainID string, vote *types.Vote) error { - err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - resp, ok := res.(*SignedVoteResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return resp.Error - } - *vote = *resp.Vote - - return nil -} - -// SignProposal implements PrivValidator. -func (sc *SignerRemote) SignProposal(chainID string, proposal *types.Proposal) error { - err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - resp, ok := res.(*SignedProposalResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return resp.Error - } - *proposal = *resp.Proposal - - return nil -} - -// Ping is used to check connection health. -func (sc *SignerRemote) Ping() error { - err := writeMsg(sc.conn, &PingRequest{}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - _, ok := res.(*PingResponse) - if !ok { - return ErrUnexpectedResponse - } - - return nil -} - -func readMsg(r io.Reader) (msg RemoteSignerMsg, err error) { - const maxRemoteSignerMsgSize = 1024 * 10 - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(r, &msg, maxRemoteSignerMsgSize) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} - -func writeMsg(w io.Writer, msg interface{}) (err error) { - _, err = cdc.MarshalBinaryLengthPrefixedWriter(w, msg) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} - -func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValidator) (RemoteSignerMsg, error) { - var res RemoteSignerMsg - var err error - - switch r := req.(type) { - case *PubKeyRequest: - var p crypto.PubKey - p = privVal.GetPubKey() - res = &PubKeyResponse{p, nil} - - case *SignVoteRequest: - err = privVal.SignVote(chainID, r.Vote) - if err != nil { - res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedVoteResponse{r.Vote, nil} - } - - case *SignProposalRequest: - err = privVal.SignProposal(chainID, r.Proposal) - if err != nil { - res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedProposalResponse{r.Proposal, nil} - } - - case *PingRequest: - res = &PingResponse{} - - default: - err = fmt.Errorf("unknown msg: %v", r) - } - - return res, err -} diff --git a/privval/signer_remote_test.go b/privval/signer_remote_test.go deleted file mode 100644 index 28230b803..000000000 --- a/privval/signer_remote_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package privval - -import ( - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -// TestSignerRemoteRetryTCPOnly will test connection retry attempts over TCP. We -// don't need this for Unix sockets because the OS instantly knows the state of -// both ends of the socket connection. This basically causes the -// SignerServiceEndpoint.dialer() call inside SignerServiceEndpoint.connect() to return -// successfully immediately, putting an instant stop to any retry attempts. -func TestSignerRemoteRetryTCPOnly(t *testing.T) { - var ( - attemptCh = make(chan int) - retries = 2 - ) - - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - go func(ln net.Listener, attemptCh chan<- int) { - attempts := 0 - - for { - conn, err := ln.Accept() - require.NoError(t, err) - - err = conn.Close() - require.NoError(t, err) - - attempts++ - - if attempts == retries { - attemptCh <- attempts - break - } - } - }(ln, attemptCh) - - serviceEndpoint := NewSignerServiceEndpoint( - log.TestingLogger(), - cmn.RandStr(12), - types.NewMockPV(), - DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()), - ) - defer serviceEndpoint.Stop() - - SignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint) - SignerServiceEndpointConnRetries(retries)(serviceEndpoint) - - assert.Equal(t, serviceEndpoint.Start(), ErrDialRetryMax) - - select { - case attempts := <-attemptCh: - assert.Equal(t, retries, attempts) - case <-time.After(100 * time.Millisecond): - t.Error("expected remote to observe connection attempts") - } -} diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go new file mode 100644 index 000000000..dcab7752e --- /dev/null +++ b/privval/signer_requestHandler.go @@ -0,0 +1,44 @@ +package privval + +import ( + "fmt" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" +) + +func DefaultValidationRequestHandler(privVal types.PrivValidator, req SignerMessage, chainID string) (SignerMessage, error) { + var res SignerMessage + var err error + + switch r := req.(type) { + case *PubKeyRequest: + var p crypto.PubKey + p = privVal.GetPubKey() + res = &PubKeyResponse{p, nil} + + case *SignVoteRequest: + err = privVal.SignVote(chainID, r.Vote) + if err != nil { + res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedVoteResponse{r.Vote, nil} + } + + case *SignProposalRequest: + err = privVal.SignProposal(chainID, r.Proposal) + if err != nil { + res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedProposalResponse{r.Proposal, nil} + } + + case *PingRequest: + err, res = nil, &PingResponse{} + + default: + err = fmt.Errorf("unknown msg: %v", r) + } + + return res, err +} diff --git a/privval/signer_server.go b/privval/signer_server.go new file mode 100644 index 000000000..62dcc461c --- /dev/null +++ b/privval/signer_server.go @@ -0,0 +1,107 @@ +package privval + +import ( + "io" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" +) + +// ValidationRequestHandlerFunc handles different remoteSigner requests +type ValidationRequestHandlerFunc func( + privVal types.PrivValidator, + requestMessage SignerMessage, + chainID string) (SignerMessage, error) + +type SignerServer struct { + cmn.BaseService + + endpoint *SignerDialerEndpoint + chainID string + privVal types.PrivValidator + + handlerMtx sync.Mutex + validationRequestHandler ValidationRequestHandlerFunc +} + +func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal types.PrivValidator) *SignerServer { + ss := &SignerServer{ + endpoint: endpoint, + chainID: chainID, + privVal: privVal, + validationRequestHandler: DefaultValidationRequestHandler, + } + + ss.BaseService = *cmn.NewBaseService(endpoint.Logger, "SignerServer", ss) + + return ss +} + +// OnStart implements cmn.Service. +func (ss *SignerServer) OnStart() error { + go ss.serviceLoop() + return nil +} + +// OnStop implements cmn.Service. +func (ss *SignerServer) OnStop() { + ss.endpoint.Logger.Debug("SignerServer: OnStop calling Close") + _ = ss.endpoint.Close() +} + +// SetRequestHandler override the default function that is used to service requests +func (ss *SignerServer) SetRequestHandler(validationRequestHandler ValidationRequestHandlerFunc) { + ss.handlerMtx.Lock() + defer ss.handlerMtx.Unlock() + ss.validationRequestHandler = validationRequestHandler +} + +func (ss *SignerServer) servicePendingRequest() { + if !ss.IsRunning() { + return // Ignore error from closing. + } + + req, err := ss.endpoint.ReadMessage() + if err != nil { + if err != io.EOF { + ss.Logger.Error("SignerServer: HandleMessage", "err", err) + } + return + } + + var res SignerMessage + { + // limit the scope of the lock + ss.handlerMtx.Lock() + defer ss.handlerMtx.Unlock() + res, err = ss.validationRequestHandler(ss.privVal, req, ss.chainID) + if err != nil { + // only log the error; we'll reply with an error in res + ss.Logger.Error("SignerServer: handleMessage", "err", err) + } + } + + if res != nil { + err = ss.endpoint.WriteMessage(res) + if err != nil { + ss.Logger.Error("SignerServer: writeMessage", "err", err) + } + } +} + +func (ss *SignerServer) serviceLoop() { + for { + select { + default: + err := ss.endpoint.ensureConnection() + if err != nil { + return + } + ss.servicePendingRequest() + + case <-ss.Quit(): + return + } + } +} diff --git a/privval/signer_service_endpoint.go b/privval/signer_service_endpoint.go deleted file mode 100644 index 1b37d5fc6..000000000 --- a/privval/signer_service_endpoint.go +++ /dev/null @@ -1,139 +0,0 @@ -package privval - -import ( - "io" - "net" - "time" - - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -// SignerServiceEndpointOption sets an optional parameter on the SignerServiceEndpoint. -type SignerServiceEndpointOption func(*SignerServiceEndpoint) - -// SignerServiceEndpointTimeoutReadWrite sets the read and write timeout for connections -// from external signing processes. -func SignerServiceEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption { - return func(ss *SignerServiceEndpoint) { ss.timeoutReadWrite = timeout } -} - -// SignerServiceEndpointConnRetries sets the amount of attempted retries to connect. -func SignerServiceEndpointConnRetries(retries int) SignerServiceEndpointOption { - return func(ss *SignerServiceEndpoint) { ss.connRetries = retries } -} - -// SignerServiceEndpoint dials using its dialer and responds to any -// signature requests using its privVal. -type SignerServiceEndpoint struct { - cmn.BaseService - - chainID string - timeoutReadWrite time.Duration - connRetries int - privVal types.PrivValidator - - dialer SocketDialer - conn net.Conn -} - -// NewSignerServiceEndpoint returns a SignerServiceEndpoint that will dial using the given -// dialer and respond to any signature requests over the connection -// using the given privVal. -func NewSignerServiceEndpoint( - logger log.Logger, - chainID string, - privVal types.PrivValidator, - dialer SocketDialer, -) *SignerServiceEndpoint { - se := &SignerServiceEndpoint{ - chainID: chainID, - timeoutReadWrite: time.Second * defaultTimeoutReadWriteSeconds, - connRetries: defaultMaxDialRetries, - privVal: privVal, - dialer: dialer, - } - - se.BaseService = *cmn.NewBaseService(logger, "SignerServiceEndpoint", se) - return se -} - -// OnStart implements cmn.Service. -func (se *SignerServiceEndpoint) OnStart() error { - conn, err := se.connect() - if err != nil { - se.Logger.Error("OnStart", "err", err) - return err - } - - se.conn = conn - go se.handleConnection(conn) - - return nil -} - -// OnStop implements cmn.Service. -func (se *SignerServiceEndpoint) OnStop() { - if se.conn == nil { - return - } - - if err := se.conn.Close(); err != nil { - se.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) - } -} - -func (se *SignerServiceEndpoint) connect() (net.Conn, error) { - for retries := 0; retries < se.connRetries; retries++ { - // Don't sleep if it is the first retry. - if retries > 0 { - time.Sleep(se.timeoutReadWrite) - } - - conn, err := se.dialer() - if err == nil { - return conn, nil - } - - se.Logger.Error("dialing", "err", err) - } - - return nil, ErrDialRetryMax -} - -func (se *SignerServiceEndpoint) handleConnection(conn net.Conn) { - for { - if !se.IsRunning() { - return // Ignore error from listener closing. - } - - // Reset the connection deadline - deadline := time.Now().Add(se.timeoutReadWrite) - err := conn.SetDeadline(deadline) - if err != nil { - return - } - - req, err := readMsg(conn) - if err != nil { - if err != io.EOF { - se.Logger.Error("handleConnection readMsg", "err", err) - } - return - } - - res, err := handleRequest(req, se.chainID, se.privVal) - - if err != nil { - // only log the error; we'll reply with an error in res - se.Logger.Error("handleConnection handleRequest", "err", err) - } - - err = writeMsg(conn, res) - if err != nil { - se.Logger.Error("handleConnection writeMsg", "err", err) - return - } - } -} diff --git a/privval/signer_validator_endpoint.go b/privval/signer_validator_endpoint.go deleted file mode 100644 index 6dc7f99d5..000000000 --- a/privval/signer_validator_endpoint.go +++ /dev/null @@ -1,230 +0,0 @@ -package privval - -import ( - "fmt" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -const ( - defaultHeartbeatSeconds = 2 - defaultMaxDialRetries = 10 -) - -var ( - heartbeatPeriod = time.Second * defaultHeartbeatSeconds -) - -// SignerValidatorEndpointOption sets an optional parameter on the SocketVal. -type SignerValidatorEndpointOption func(*SignerValidatorEndpoint) - -// SignerValidatorEndpointSetHeartbeat sets the period on which to check the liveness of the -// connected Signer connections. -func SignerValidatorEndpointSetHeartbeat(period time.Duration) SignerValidatorEndpointOption { - return func(sc *SignerValidatorEndpoint) { sc.heartbeatPeriod = period } -} - -// SocketVal implements PrivValidator. -// It listens for an external process to dial in and uses -// the socket to request signatures. -type SignerValidatorEndpoint struct { - cmn.BaseService - - listener net.Listener - - // ping - cancelPingCh chan struct{} - pingTicker *time.Ticker - heartbeatPeriod time.Duration - - // signer is mutable since it can be reset if the connection fails. - // failures are detected by a background ping routine. - // All messages are request/response, so we hold the mutex - // so only one request/response pair can happen at a time. - // Methods on the underlying net.Conn itself are already goroutine safe. - mtx sync.Mutex - - // TODO: Signer should encapsulate and hide the endpoint completely. Invert the relation - signer *SignerRemote -} - -// Check that SignerValidatorEndpoint implements PrivValidator. -var _ types.PrivValidator = (*SignerValidatorEndpoint)(nil) - -// NewSignerValidatorEndpoint returns an instance of SignerValidatorEndpoint. -func NewSignerValidatorEndpoint(logger log.Logger, listener net.Listener) *SignerValidatorEndpoint { - sc := &SignerValidatorEndpoint{ - listener: listener, - heartbeatPeriod: heartbeatPeriod, - } - - sc.BaseService = *cmn.NewBaseService(logger, "SignerValidatorEndpoint", sc) - - return sc -} - -//-------------------------------------------------------- -// Implement PrivValidator - -// GetPubKey implements PrivValidator. -func (ve *SignerValidatorEndpoint) GetPubKey() crypto.PubKey { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.GetPubKey() -} - -// SignVote implements PrivValidator. -func (ve *SignerValidatorEndpoint) SignVote(chainID string, vote *types.Vote) error { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.SignVote(chainID, vote) -} - -// SignProposal implements PrivValidator. -func (ve *SignerValidatorEndpoint) SignProposal(chainID string, proposal *types.Proposal) error { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.SignProposal(chainID, proposal) -} - -//-------------------------------------------------------- -// More thread safe methods proxied to the signer - -// Ping is used to check connection health. -func (ve *SignerValidatorEndpoint) Ping() error { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.Ping() -} - -// Close closes the underlying net.Conn. -func (ve *SignerValidatorEndpoint) Close() { - ve.mtx.Lock() - defer ve.mtx.Unlock() - if ve.signer != nil { - if err := ve.signer.Close(); err != nil { - ve.Logger.Error("OnStop", "err", err) - } - } - - if ve.listener != nil { - if err := ve.listener.Close(); err != nil { - ve.Logger.Error("OnStop", "err", err) - } - } -} - -//-------------------------------------------------------- -// Service start and stop - -// OnStart implements cmn.Service. -func (ve *SignerValidatorEndpoint) OnStart() error { - if closed, err := ve.reset(); err != nil { - ve.Logger.Error("OnStart", "err", err) - return err - } else if closed { - return fmt.Errorf("listener is closed") - } - - // Start a routine to keep the connection alive - ve.cancelPingCh = make(chan struct{}, 1) - ve.pingTicker = time.NewTicker(ve.heartbeatPeriod) - go func() { - for { - select { - case <-ve.pingTicker.C: - err := ve.Ping() - if err != nil { - ve.Logger.Error("Ping", "err", err) - if err == ErrUnexpectedResponse { - return - } - - closed, err := ve.reset() - if err != nil { - ve.Logger.Error("Reconnecting to remote signer failed", "err", err) - continue - } - if closed { - ve.Logger.Info("listener is closing") - return - } - - ve.Logger.Info("Re-created connection to remote signer", "impl", ve) - } - case <-ve.cancelPingCh: - ve.pingTicker.Stop() - return - } - } - }() - - return nil -} - -// OnStop implements cmn.Service. -func (ve *SignerValidatorEndpoint) OnStop() { - if ve.cancelPingCh != nil { - close(ve.cancelPingCh) - } - ve.Close() -} - -//-------------------------------------------------------- -// Connection and signer management - -// waits to accept and sets a new connection. -// connection is closed in OnStop. -// returns true if the listener is closed -// (ie. it returns a nil conn). -func (ve *SignerValidatorEndpoint) reset() (closed bool, err error) { - ve.mtx.Lock() - defer ve.mtx.Unlock() - - // first check if the conn already exists and close it. - if ve.signer != nil { - if tmpErr := ve.signer.Close(); tmpErr != nil { - ve.Logger.Error("error closing socket val connection during reset", "err", tmpErr) - } - } - - // wait for a new conn - conn, err := ve.acceptConnection() - if err != nil { - return false, err - } - - // listener is closed - if conn == nil { - return true, nil - } - - ve.signer, err = NewSignerRemote(conn) - if err != nil { - // failed to fetch the pubkey. close out the connection. - if tmpErr := conn.Close(); tmpErr != nil { - ve.Logger.Error("error closing connection", "err", tmpErr) - } - return false, err - } - return false, nil -} - -// Attempt to accept a connection. -// Times out after the listener's timeoutAccept -func (ve *SignerValidatorEndpoint) acceptConnection() (net.Conn, error) { - conn, err := ve.listener.Accept() - if err != nil { - if !ve.IsRunning() { - return nil, nil // Ignore error from listener closing. - } - return nil, err - } - return conn, nil -} diff --git a/privval/signer_validator_endpoint_test.go b/privval/signer_validator_endpoint_test.go deleted file mode 100644 index 611e743c9..000000000 --- a/privval/signer_validator_endpoint_test.go +++ /dev/null @@ -1,506 +0,0 @@ -package privval - -import ( - "fmt" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - - "github.com/tendermint/tendermint/types" -) - -var ( - testTimeoutAccept = defaultTimeoutAcceptSeconds * time.Second - - testTimeoutReadWrite = 100 * time.Millisecond - testTimeoutReadWrite2o3 = 66 * time.Millisecond // 2/3 of the other one - - testTimeoutHeartbeat = 10 * time.Millisecond - testTimeoutHeartbeat3o2 = 6 * time.Millisecond // 3/2 of the other one -) - -type socketTestCase struct { - addr string - dialer SocketDialer -} - -func socketTestCases(t *testing.T) []socketTestCase { - tcpAddr := fmt.Sprintf("tcp://%s", testFreeTCPAddr(t)) - unixFilePath, err := testUnixAddr() - require.NoError(t, err) - unixAddr := fmt.Sprintf("unix://%s", unixFilePath) - return []socketTestCase{ - { - addr: tcpAddr, - dialer: DialTCPFn(tcpAddr, testTimeoutReadWrite, ed25519.GenPrivKey()), - }, - { - addr: unixAddr, - dialer: DialUnixFn(unixFilePath), - }, - } -} - -func TestSocketPVAddress(t *testing.T) { - for _, tc := range socketTestCases(t) { - // Execute the test within a closure to ensure the deferred statements - // are called between each for loop iteration, for isolated test cases. - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer) - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - serviceAddr := serviceEndpoint.privVal.GetPubKey().Address() - validatorAddr := validatorEndpoint.GetPubKey().Address() - - assert.Equal(t, serviceAddr, validatorAddr) - }() - } -} - -func TestSocketPVPubKey(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - clientKey := validatorEndpoint.GetPubKey() - privvalPubKey := serviceEndpoint.privVal.GetPubKey() - - assert.Equal(t, privvalPubKey, clientKey) - }() - } -} - -func TestSocketPVProposal(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - privProposal = &types.Proposal{Timestamp: ts} - clientProposal = &types.Proposal{Timestamp: ts} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - require.NoError(t, serviceEndpoint.privVal.SignProposal(chainID, privProposal)) - require.NoError(t, validatorEndpoint.SignProposal(chainID, clientProposal)) - - assert.Equal(t, privProposal.Signature, clientProposal.Signature) - }() - } -} - -func TestSocketPVVote(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - }() - } -} - -func TestSocketPVVoteResetDeadline(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - - // This would exceed the deadline if it was not extended by the previous message - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - }() - } -} - -func TestSocketPVVoteKeepalive(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - time.Sleep(testTimeoutReadWrite * 2) - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - }() - } -} - -func TestSocketPVDeadline(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - listenc = make(chan struct{}) - thisConnTimeout = 100 * time.Millisecond - validatorEndpoint = newSignerValidatorEndpoint(log.TestingLogger(), tc.addr, thisConnTimeout) - ) - - go func(sc *SignerValidatorEndpoint) { - defer close(listenc) - - // Note: the TCP connection times out at the accept() phase, - // whereas the Unix domain sockets connection times out while - // attempting to fetch the remote signer's public key. - assert.True(t, IsConnTimeout(sc.Start())) - - assert.False(t, sc.IsRunning()) - }(validatorEndpoint) - - for { - _, err := cmn.Connect(tc.addr) - if err == nil { - break - } - } - - <-listenc - }() - } -} - -func TestRemoteSignVoteErrors(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewErroringMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - vote = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - err := validatorEndpoint.SignVote("", vote) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = serviceEndpoint.privVal.SignVote(chainID, vote) - require.Error(t, err) - err = validatorEndpoint.SignVote(chainID, vote) - require.Error(t, err) - }() - } -} - -func TestRemoteSignProposalErrors(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewErroringMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - proposal = &types.Proposal{Timestamp: ts} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - err := validatorEndpoint.SignProposal("", proposal) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = serviceEndpoint.privVal.SignProposal(chainID, proposal) - require.Error(t, err) - - err = validatorEndpoint.SignProposal(chainID, proposal) - require.Error(t, err) - }() - } -} - -func TestErrUnexpectedResponse(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - logger = log.TestingLogger() - chainID = cmn.RandStr(12) - readyCh = make(chan struct{}) - errCh = make(chan error, 1) - - serviceEndpoint = NewSignerServiceEndpoint( - logger, - chainID, - types.NewMockPV(), - tc.dialer, - ) - - validatorEndpoint = newSignerValidatorEndpoint( - logger, - tc.addr, - testTimeoutReadWrite) - ) - - testStartEndpoint(t, readyCh, validatorEndpoint) - defer validatorEndpoint.Stop() - SignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint) - SignerServiceEndpointConnRetries(100)(serviceEndpoint) - // we do not want to Start() the remote signer here and instead use the connection to - // reply with intentionally wrong replies below: - rsConn, err := serviceEndpoint.connect() - require.NoError(t, err) - require.NotNil(t, rsConn) - defer rsConn.Close() - - // send over public key to get the remote signer running: - go testReadWriteResponse(t, &PubKeyResponse{}, rsConn) - <-readyCh - - // Proposal: - go func(errc chan error) { - errc <- validatorEndpoint.SignProposal(chainID, &types.Proposal{}) - }(errCh) - - // read request and write wrong response: - go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn) - err = <-errCh - require.Error(t, err) - require.Equal(t, err, ErrUnexpectedResponse) - - // Vote: - go func(errc chan error) { - errc <- validatorEndpoint.SignVote(chainID, &types.Vote{}) - }(errCh) - // read request and write wrong response: - go testReadWriteResponse(t, &SignedProposalResponse{}, rsConn) - err = <-errCh - require.Error(t, err) - require.Equal(t, err, ErrUnexpectedResponse) - }() - } -} - -func TestRetryConnToRemoteSigner(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - logger = log.TestingLogger() - chainID = cmn.RandStr(12) - readyCh = make(chan struct{}) - - serviceEndpoint = NewSignerServiceEndpoint( - logger, - chainID, - types.NewMockPV(), - tc.dialer, - ) - thisConnTimeout = testTimeoutReadWrite - validatorEndpoint = newSignerValidatorEndpoint(logger, tc.addr, thisConnTimeout) - ) - // Ping every: - SignerValidatorEndpointSetHeartbeat(testTimeoutHeartbeat)(validatorEndpoint) - - SignerServiceEndpointTimeoutReadWrite(testTimeoutReadWrite)(serviceEndpoint) - SignerServiceEndpointConnRetries(10)(serviceEndpoint) - - testStartEndpoint(t, readyCh, validatorEndpoint) - defer validatorEndpoint.Stop() - require.NoError(t, serviceEndpoint.Start()) - assert.True(t, serviceEndpoint.IsRunning()) - - <-readyCh - time.Sleep(testTimeoutHeartbeat * 2) - - serviceEndpoint.Stop() - rs2 := NewSignerServiceEndpoint( - logger, - chainID, - types.NewMockPV(), - tc.dialer, - ) - // let some pings pass - time.Sleep(testTimeoutHeartbeat3o2) - require.NoError(t, rs2.Start()) - assert.True(t, rs2.IsRunning()) - defer rs2.Stop() - - // give the client some time to re-establish the conn to the remote signer - // should see sth like this in the logs: - // - // E[10016-01-10|17:12:46.128] Ping err="remote signer timed out" - // I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal - time.Sleep(testTimeoutReadWrite * 2) - }() - } -} - -func newSignerValidatorEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerValidatorEndpoint { - proto, address := cmn.ProtocolAndAddress(addr) - - ln, err := net.Listen(proto, address) - logger.Info("Listening at", "proto", proto, "address", address) - if err != nil { - panic(err) - } - - var listener net.Listener - - if proto == "unix" { - unixLn := NewUnixListener(ln) - UnixListenerTimeoutAccept(testTimeoutAccept)(unixLn) - UnixListenerTimeoutReadWrite(timeoutReadWrite)(unixLn) - listener = unixLn - } else { - tcpLn := NewTCPListener(ln, ed25519.GenPrivKey()) - TCPListenerTimeoutAccept(testTimeoutAccept)(tcpLn) - TCPListenerTimeoutReadWrite(timeoutReadWrite)(tcpLn) - listener = tcpLn - } - - return NewSignerValidatorEndpoint(logger, listener) -} - -func testSetupSocketPair( - t *testing.T, - chainID string, - privValidator types.PrivValidator, - addr string, - socketDialer SocketDialer, -) (*SignerValidatorEndpoint, *SignerServiceEndpoint) { - var ( - logger = log.TestingLogger() - privVal = privValidator - readyc = make(chan struct{}) - serviceEndpoint = NewSignerServiceEndpoint( - logger, - chainID, - privVal, - socketDialer, - ) - - thisConnTimeout = testTimeoutReadWrite - validatorEndpoint = newSignerValidatorEndpoint(logger, addr, thisConnTimeout) - ) - - SignerValidatorEndpointSetHeartbeat(testTimeoutHeartbeat)(validatorEndpoint) - SignerServiceEndpointTimeoutReadWrite(testTimeoutReadWrite)(serviceEndpoint) - SignerServiceEndpointConnRetries(1e6)(serviceEndpoint) - - testStartEndpoint(t, readyc, validatorEndpoint) - - require.NoError(t, serviceEndpoint.Start()) - assert.True(t, serviceEndpoint.IsRunning()) - - <-readyc - - return validatorEndpoint, serviceEndpoint -} - -func testReadWriteResponse(t *testing.T, resp RemoteSignerMsg, rsConn net.Conn) { - _, err := readMsg(rsConn) - require.NoError(t, err) - - err = writeMsg(rsConn, resp) - require.NoError(t, err) -} - -func testStartEndpoint(t *testing.T, readyCh chan struct{}, sc *SignerValidatorEndpoint) { - go func(sc *SignerValidatorEndpoint) { - require.NoError(t, sc.Start()) - assert.True(t, sc.IsRunning()) - - readyCh <- struct{}{} - }(sc) -} - -// testFreeTCPAddr claims a free port so we don't block on listener being ready. -func testFreeTCPAddr(t *testing.T) string { - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer ln.Close() - - return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) -} diff --git a/privval/socket_dialers_test.go b/privval/socket_dialers_test.go index 9d5d5cc2b..d7b372b85 100644 --- a/privval/socket_dialers_test.go +++ b/privval/socket_dialers_test.go @@ -1,26 +1,49 @@ package privval import ( + "fmt" "testing" "time" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" ) +func getDialerTestCases(t *testing.T) []dialerTestCase { + tcpAddr := GetFreeLocalhostAddrPort() + unixFilePath, err := testUnixAddr() + require.NoError(t, err) + unixAddr := fmt.Sprintf("unix://%s", unixFilePath) + + return []dialerTestCase{ + { + addr: tcpAddr, + dialer: DialTCPFn(tcpAddr, testTimeoutReadWrite, ed25519.GenPrivKey()), + }, + { + addr: unixAddr, + dialer: DialUnixFn(unixFilePath), + }, + } +} + func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) { // Generate a networking timeout - dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey()) + tcpAddr := GetFreeLocalhostAddrPort() + dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) assert.True(t, IsConnTimeout(err)) } func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) { - dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey()) + tcpAddr := GetFreeLocalhostAddrPort() + dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + err = errors.Wrap(ErrConnectionTimeout, err.Error()) assert.True(t, IsConnTimeout(err)) } diff --git a/privval/socket_listeners.go b/privval/socket_listeners.go index 7c8835791..f4d875e71 100644 --- a/privval/socket_listeners.go +++ b/privval/socket_listeners.go @@ -9,8 +9,8 @@ import ( ) const ( - defaultTimeoutAcceptSeconds = 3 - defaultTimeoutReadWriteSeconds = 3 + defaultTimeoutAcceptSeconds = 3 + defaultPingPeriodMilliseconds = 100 ) // timeoutError can be used to check if an error returned from the netp package diff --git a/privval/utils.go b/privval/utils.go index d8837bdf0..65368eb28 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -1,20 +1,62 @@ package privval import ( + "fmt" + "net" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) // IsConnTimeout returns a boolean indicating whether the error is known to // report that a connection timeout occurred. This detects both fundamental // network timeouts, as well as ErrConnTimeout errors. func IsConnTimeout(err error) bool { - if cmnErr, ok := err.(cmn.Error); ok { - if cmnErr.Data() == ErrConnTimeout { - return true - } - } - if _, ok := err.(timeoutError); ok { + switch errors.Cause(err).(type) { + case EndpointTimeoutError: + return true + case timeoutError: return true + default: + return false + } +} + +// NewSignerListener creates a new SignerListenerEndpoint using the corresponding listen address +func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEndpoint, error) { + var listener net.Listener + + protocol, address := cmn.ProtocolAndAddress(listenAddr) + ln, err := net.Listen(protocol, address) + if err != nil { + return nil, err + } + switch protocol { + case "unix": + listener = NewUnixListener(ln) + case "tcp": + // TODO: persist this key so external signer can actually authenticate us + listener = NewTCPListener(ln, ed25519.GenPrivKey()) + default: + return nil, fmt.Errorf( + "wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", + protocol, + ) + } + + pve := NewSignerListenerEndpoint(logger.With("module", "privval"), listener) + + return pve, nil +} + +// GetFreeLocalhostAddrPort returns a free localhost:port address +func GetFreeLocalhostAddrPort() string { + port, err := cmn.GetFreePort() + if err != nil { + panic(err) } - return false + return fmt.Sprintf("127.0.0.1:%d", port) } diff --git a/privval/utils_test.go b/privval/utils_test.go index 23f6f6a3b..5648efec5 100644 --- a/privval/utils_test.go +++ b/privval/utils_test.go @@ -1,14 +1,13 @@ package privval import ( - "fmt" "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tendermint/libs/common" ) func TestIsConnTimeoutForNonTimeoutErrors(t *testing.T) { - assert.False(t, IsConnTimeout(cmn.ErrorWrap(ErrDialRetryMax, "max retries exceeded"))) - assert.False(t, IsConnTimeout(fmt.Errorf("completely irrelevant error"))) + assert.False(t, IsConnTimeout(errors.Wrap(ErrDialRetryMax, "max retries exceeded"))) + assert.False(t, IsConnTimeout(errors.New("completely irrelevant error"))) } diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index ca98f1be4..2004b0b89 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -147,7 +147,7 @@ func TestInfo(t *testing.T) { if err != nil { t.Errorf("Unexpected error: %v", err) } - if string(resInfo.Data) != "{\"size\":0}" { + if resInfo.Data != "{\"size\":0}" { t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else") } } diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 85f065b61..d1981e1ce 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -2,6 +2,7 @@ package client import ( "context" + "net/http" "strings" "sync" "time" @@ -84,8 +85,19 @@ var _ rpcClient = (*baseRPCClient)(nil) // NewHTTP takes a remote endpoint in the form ://: and // the websocket path (which always seems to be "/websocket") +// The function panics if the provided remote is invalid. func NewHTTP(remote, wsEndpoint string) *HTTP { - rc := rpcclient.NewJSONRPCClient(remote) + httpClient := rpcclient.DefaultHTTPClient(remote) + return NewHTTPWithClient(remote, wsEndpoint, httpClient) +} + +// NewHTTPWithClient allows for setting a custom http client. See NewHTTP +// The function panics if the provided client is nil or remote is invalid. +func NewHTTPWithClient(remote, wsEndpoint string, client *http.Client) *HTTP { + if client == nil { + panic("nil http.Client provided") + } + rc := rpcclient.NewJSONRPCClientWithHTTPClient(remote, client) cdc := rc.Codec() ctypes.RegisterAmino(cdc) rc.SetCodec(cdc) @@ -453,6 +465,8 @@ func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error func (w *WSEvents) redoSubscriptionsAfter(d time.Duration) { time.Sleep(d) + w.mtx.RLock() + defer w.mtx.RUnlock() for q := range w.subscriptions { err := w.ws.Subscribe(context.Background(), q) if err != nil { diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 383e0b480..c4a7b023d 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -39,6 +39,7 @@ type Client interface { SignClient StatusClient EvidenceClient + MempoolClient } // ABCIClient groups together the functionality that principally affects the diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 3ec40d6cc..b7fdd602d 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -37,6 +37,7 @@ type Client struct { client.StatusClient client.EventsClient client.EvidenceClient + client.MempoolClient cmn.Service } diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index de5e18f11..8bcbd313d 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -20,6 +20,7 @@ import ( "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpcclient "github.com/tendermint/tendermint/rpc/lib/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) @@ -41,6 +42,23 @@ func GetClients() []client.Client { } } +func TestNilCustomHTTPClient(t *testing.T) { + require.Panics(t, func() { + client.NewHTTPWithClient("http://example.com", "/websocket", nil) + }) + require.Panics(t, func() { + rpcclient.NewJSONRPCClientWithHTTPClient("http://example.com", nil) + }) +} + +func TestCustomHTTPClient(t *testing.T) { + remote := rpctest.GetConfig().RPC.ListenAddress + c := client.NewHTTPWithClient(remote, "/websocket", http.DefaultClient) + status, err := c.Status() + require.NoError(t, err) + require.NotNil(t, status) +} + func TestCorsEnabled(t *testing.T) { origin := rpctest.GetConfig().RPC.CORSAllowedOrigins[0] remote := strings.Replace(rpctest.GetConfig().RPC.ListenAddress, "tcp", "http", -1) @@ -541,7 +559,7 @@ func makeEvidences(t *testing.T, val *privval.FilePV, chainID string) (ev types. // exactly same vote vote2 = deepcpVote(vote) fakes[41] = newEvidence(t, val, vote, vote2, chainID) - return + return ev, fakes } func TestBroadcastEvidenceDuplicateVote(t *testing.T) { diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 1a0954438..ba1ed291d 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -346,7 +346,7 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmed // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") // err := client.Start() // if err != nil { -// // handle error +// // handle error // } // defer client.Stop() // result, err := client.UnconfirmedTxs() @@ -361,8 +361,8 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmed // "result" : { // "n_txs" : "0", // "total_bytes" : "0", -// "txs" : null, // "total" : "0" +// "txs" : null, // } // } // ``` diff --git a/rpc/core/routes.go b/rpc/core/routes.go index df7cef905..dbd9c6059 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -5,7 +5,7 @@ import ( ) // TODO: better system than "unsafe" prefix -// NOTE: Amino is registered in rpc/core/types/wire.go. +// NOTE: Amino is registered in rpc/core/types/codec.go. var Routes = map[string]*rpc.RPCFunc{ // subscribe/unsubscribe are reserved for websocket events. "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), diff --git a/rpc/core/tx.go b/rpc/core/tx.go index dba457c30..50a11fd45 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -106,7 +106,7 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error return &ctypes.ResultTx{ Hash: hash, Height: height, - Index: uint32(index), + Index: index, TxResult: r.Result, Tx: r.Tx, Proof: proof, diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index db57c536e..963da30b8 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -35,11 +35,41 @@ type HTTPClient interface { SetCodec(*amino.Codec) } -// TODO: Deprecate support for IP:PORT or /path/to/socket -func makeHTTPDialer(remoteAddr string) (string, string, func(string, string) (net.Conn, error)) { +// protocol - client's protocol (for example, "http", "https", "wss", "ws", "tcp") +// trimmedS - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80") with "/" replaced with "." +func toClientAddrAndParse(remoteAddr string) (network string, trimmedS string, err error) { + protocol, address, err := parseRemoteAddr(remoteAddr) + if err != nil { + return "", "", err + } + // protocol to use for http operations, to support both http and https - clientProtocol := protoHTTP + var clientProtocol string + // default to http for unknown protocols (ex. tcp) + switch protocol { + case protoHTTP, protoHTTPS, protoWS, protoWSS: + clientProtocol = protocol + default: + clientProtocol = protoHTTP + } + // replace / with . for http requests (kvstore domain) + trimmedAddress := strings.Replace(address, "/", ".", -1) + return clientProtocol, trimmedAddress, nil +} + +func toClientAddress(remoteAddr string) (string, error) { + clientProtocol, trimmedAddress, err := toClientAddrAndParse(remoteAddr) + if err != nil { + return "", err + } + return clientProtocol + "://" + trimmedAddress, nil +} + +// network - name of the network (for example, "tcp", "unix") +// s - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80") +// TODO: Deprecate support for IP:PORT or /path/to/socket +func parseRemoteAddr(remoteAddr string) (network string, s string, err error) { parts := strings.SplitN(remoteAddr, "://", 2) var protocol, address string switch { @@ -49,38 +79,44 @@ func makeHTTPDialer(remoteAddr string) (string, string, func(string, string) (ne case len(parts) == 2: protocol, address = parts[0], parts[1] default: - // return a invalid message - msg := fmt.Sprintf("Invalid addr: %s", remoteAddr) - return clientProtocol, msg, func(_ string, _ string) (net.Conn, error) { - return nil, errors.New(msg) - } + return "", "", fmt.Errorf("invalid addr: %s", remoteAddr) } - // accept http as an alias for tcp and set the client protocol + // accept http(s) as an alias for tcp switch protocol { case protoHTTP, protoHTTPS: - clientProtocol = protocol protocol = protoTCP - case protoWS, protoWSS: - clientProtocol = protocol } - // replace / with . for http requests (kvstore domain) - trimmedAddress := strings.Replace(address, "/", ".", -1) - return clientProtocol, trimmedAddress, func(proto, addr string) (net.Conn, error) { + return protocol, address, nil +} + +func makeErrorDialer(err error) func(string, string) (net.Conn, error) { + return func(_ string, _ string) (net.Conn, error) { + return nil, err + } +} + +func makeHTTPDialer(remoteAddr string) func(string, string) (net.Conn, error) { + protocol, address, err := parseRemoteAddr(remoteAddr) + if err != nil { + return makeErrorDialer(err) + } + + return func(proto, addr string) (net.Conn, error) { return net.Dial(protocol, address) } } +// DefaultHTTPClient is used to create an http client with some default parameters. // We overwrite the http.Client.Dial so we can do http over tcp or unix. // remoteAddr should be fully featured (eg. with tcp:// or unix://) -func makeHTTPClient(remoteAddr string) (string, *http.Client) { - protocol, address, dialer := makeHTTPDialer(remoteAddr) - return protocol + "://" + address, &http.Client{ +func DefaultHTTPClient(remoteAddr string) *http.Client { + return &http.Client{ Transport: &http.Transport{ // Set to true to prevent GZIP-bomb DoS attacks DisableCompression: true, - Dial: dialer, + Dial: makeHTTPDialer(remoteAddr), }, } } @@ -124,9 +160,23 @@ var _ JSONRPCCaller = (*JSONRPCRequestBatch)(nil) // NewJSONRPCClient returns a JSONRPCClient pointed at the given address. func NewJSONRPCClient(remote string) *JSONRPCClient { - address, client := makeHTTPClient(remote) + return NewJSONRPCClientWithHTTPClient(remote, DefaultHTTPClient(remote)) +} + +// NewJSONRPCClientWithHTTPClient returns a JSONRPCClient pointed at the given address using a custom http client +// The function panics if the provided client is nil or remote is invalid. +func NewJSONRPCClientWithHTTPClient(remote string, client *http.Client) *JSONRPCClient { + if client == nil { + panic("nil http.Client provided") + } + + clientAddress, err := toClientAddress(remote) + if err != nil { + panic(fmt.Sprintf("invalid remote %s: %s", remote, err)) + } + return &JSONRPCClient{ - address: address, + address: clientAddress, client: client, id: types.JSONRPCStringID("jsonrpc-client-" + cmn.RandStr(8)), cdc: amino.NewCodec(), @@ -259,11 +309,15 @@ type URIClient struct { cdc *amino.Codec } +// The function panics if the provided remote is invalid. func NewURIClient(remote string) *URIClient { - address, client := makeHTTPClient(remote) + clientAddress, err := toClientAddress(remote) + if err != nil { + panic(fmt.Sprintf("invalid remote %s: %s", remote, err)) + } return &URIClient{ - address: address, - client: client, + address: clientAddress, + client: DefaultHTTPClient(remote), cdc: amino.NewCodec(), } } @@ -340,6 +394,7 @@ func unmarshalResponseBytesArray(cdc *amino.Codec, responseBytes []byte, expecte } for i, response := range responses { + response := response // From the JSON-RPC 2.0 spec: // id: It MUST be the same as the value of the id member in the Request Object. if err := validateResponseID(&response, expectedID); err != nil { diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 05180c753..1779e9dbd 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -78,8 +78,12 @@ type WSClient struct { // NewWSClient returns a new client. See the commentary on the func(*WSClient) // functions for a detailed description of how to configure ping period and // pong wait time. The endpoint argument must begin with a `/`. +// The function panics if the provided address is invalid. func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSClient { - protocol, addr, dialer := makeHTTPDialer(remoteAddr) + protocol, addr, err := toClientAddrAndParse(remoteAddr) + if err != nil { + panic(fmt.Sprintf("invalid remote %s: %s", remoteAddr, err)) + } // default to ws protocol, unless wss is explicitly specified if protocol != "wss" { protocol = "ws" @@ -88,7 +92,7 @@ func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSCli c := &WSClient{ cdc: amino.NewCodec(), Address: addr, - Dialer: dialer, + Dialer: makeHTTPDialer(remoteAddr), Endpoint: endpoint, PingPongLatencyTimer: metrics.NewTimer(), diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 3fa4de47f..9af5728a8 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -33,6 +33,8 @@ const ( unixAddr = "unix://" + unixSocket websocketEndpoint = "/websocket/endpoint" + + testVal = "acbd" ) type ResultEcho struct { @@ -189,7 +191,7 @@ func echoDataBytesViaHTTP(cl client.HTTPClient, bytes cmn.HexBytes) (cmn.HexByte } func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { - val := "acbd" + val := testVal got, err := echoViaHTTP(cl, val) require.Nil(t, err) assert.Equal(t, got, val) @@ -255,7 +257,7 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { } func testWithWSClient(t *testing.T, cl *client.WSClient) { - val := "acbd" + val := testVal got, err := echoViaWS(cl, val) require.Nil(t, err) assert.Equal(t, got, val) @@ -314,7 +316,7 @@ func TestWSNewWSRPCFunc(t *testing.T) { require.Nil(t, err) defer cl.Stop() - val := "acbd" + val := testVal params := map[string]interface{}{ "arg": val, } @@ -339,7 +341,7 @@ func TestWSHandlesArrayParams(t *testing.T) { require.Nil(t, err) defer cl.Stop() - val := "acbd" + val := testVal params := []interface{}{val} err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) require.Nil(t, err) diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 5b5c9f8b9..cb7346769 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -129,6 +129,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo } for _, request := range requests { + request := request // A Notification is a Request object without an "id" member. // The Server MUST NOT reply to a Notification, including those that are within a batch request. if request.ID == types.JSONRPCStringID("") { @@ -376,9 +377,9 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect rv, err := jsonStringToArg(cdc, rt, qarg) if err != nil { return rv, err, false - } else { - return rv, nil, true } + + return rv, nil, true } if isHexString { @@ -396,7 +397,7 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect if rt.Kind() == reflect.String { return reflect.ValueOf(string(value)), nil, true } - return reflect.ValueOf([]byte(value)), nil, true + return reflect.ValueOf(value), nil, true } if isQuotedString && expectingByteSlice { diff --git a/rpc/lib/types/types_test.go b/rpc/lib/types/types_test.go index a5b2da9ce..b57211a96 100644 --- a/rpc/lib/types/types_test.go +++ b/rpc/lib/types/types_test.go @@ -39,17 +39,17 @@ func TestResponses(t *testing.T) { a := NewRPCSuccessResponse(cdc, jsonid, &SampleResult{"hello"}) b, _ := json.Marshal(a) s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected) - assert.Equal(string(s), string(b)) + assert.Equal(s, string(b)) d := RPCParseError(jsonid, errors.New("Hello world")) e, _ := json.Marshal(d) f := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}`, tt.expected) - assert.Equal(string(f), string(e)) + assert.Equal(f, string(e)) g := RPCMethodNotFoundError(jsonid) h, _ := json.Marshal(g) i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found"}}`, tt.expected) - assert.Equal(string(h), string(i)) + assert.Equal(string(h), i) } } diff --git a/scripts/devtools/Makefile b/scripts/devtools/Makefile new file mode 100644 index 000000000..1ca24bf9d --- /dev/null +++ b/scripts/devtools/Makefile @@ -0,0 +1,92 @@ +### +# Find OS and Go environment +# GO contains the Go binary +# FS contains the OS file separator +### +ifeq ($(OS),Windows_NT) + GO := $(shell where go.exe 2> NUL) + FS := "\\" +else + GO := $(shell command -v go 2> /dev/null) + FS := "/" +endif + +ifeq ($(GO),) + $(error could not find go. Is it in PATH? $(GO)) +endif + +GOPATH ?= $(shell $(GO) env GOPATH) +GITHUBDIR := $(GOPATH)$(FS)src$(FS)github.com + +### +# Functions +### + +go_get = $(if $(findstring Windows_NT,$(OS)),\ +IF NOT EXIST $(GITHUBDIR)$(FS)$(1)$(FS) ( mkdir $(GITHUBDIR)$(FS)$(1) ) else (cd .) &\ +IF NOT EXIST $(GITHUBDIR)$(FS)$(1)$(FS)$(2)$(FS) ( cd $(GITHUBDIR)$(FS)$(1) && git clone https://github.com/$(1)/$(2) ) else (cd .) &\ +,\ +mkdir -p $(GITHUBDIR)$(FS)$(1) &&\ +(test ! -d $(GITHUBDIR)$(FS)$(1)$(FS)$(2) && cd $(GITHUBDIR)$(FS)$(1) && git clone https://github.com/$(1)/$(2)) || true &&\ +)\ +cd $(GITHUBDIR)$(FS)$(1)$(FS)$(2) && git fetch origin && git checkout -q $(3) + +mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) +mkfile_dir := $(shell cd $(shell dirname $(mkfile_path)); pwd) + +### +# tools +### + +TOOLS_DESTDIR ?= $(GOPATH)/bin + +GOIMPORTS = $(TOOLS_DESTDIR)/goimports +CERTSTRAP = $(TOOLS_DESTDIR)/certstrap +PROTOBUF = $(TOOLS_DESTDIR)/protoc +GOX = $(TOOLS_DESTDIR)/gox +GOODMAN = $(TOOLS_DESTDIR)/goodman + +all: tools + +tools: goimports certstrap protobuf gox goodman + +check: check_tools + +check_tools: + @# https://stackoverflow.com/a/25668869 + @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\ + $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" + +goimports: $(GOIMPORTS) +$(GOIMPORTS): + @echo "Get goimports@v0.0.0-20190628034336-212fb13d595e" + @go get golang.org/x/tools/cmd/goimports@v0.0.0-20190628034336-212fb13d595e + +certstrap: $(CERTSTRAP) +$(CERTSTRAP): + @echo "Get Certstrap" + @go get github.com/square/certstrap@338204a88c4349b1c135eac1e8c14c693ad007da + +protobuf: $(PROTOBUF) +$(PROTOBUF): + @echo "Get Protobuf" + ## protobuf v1.3.0 + @go get github.com/gogo/protobuf/protoc-gen-gogo@0ca988a254f991240804bf9821f3450d87ccbb1b + +gox: $(GOX) +$(GOX): + @echo "Get Gox" +# used to build tm-monitor & tm-bench binaries + ## gox v1.0.1 + @go get github.com/mitchellh/gox@d8caaff5a9dc98f4cfa1fcce6e7265a04689f641 + +goodman: $(GOODMAN) +$(GOODMAN): + @echo "Get Goodman" + @go get github.com/snikch/goodman/cmd/goodman@10e37e294daa3c9a90abded60ff9924bafab3888 + +tools-clean: + rm -f $(CERTSTRAP) $(GOIMPORTS) $(PROTOBUF) $(GOX) $(GOODMAN) + rm -f tools-stamp + +.PHONY: all tools tools-clean diff --git a/scripts/dist.sh b/scripts/dist.sh index ac62f1099..81fdf9813 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -29,7 +29,7 @@ XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} XC_EXCLUDE=${XC_EXCLUDE:-" darwin/arm solaris/amd64 solaris/386 solaris/arm freebsd/amd64 windows/arm "} # Make sure build tools are available. -make get_tools +make tools # Build! # ldflags: -s Omit the symbol table and debug information. diff --git a/scripts/get_nodejs.sh b/scripts/get_nodejs.sh new file mode 100755 index 000000000..59469cc50 --- /dev/null +++ b/scripts/get_nodejs.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +VERSION=v12.9.0 +NODE_FULL=node-${VERSION}-linux-x64 + +mkdir -p ~/.local/bin +mkdir -p ~/.local/node +wget http://nodejs.org/dist/${VERSION}/${NODE_FULL}.tar.gz -O ~/.local/node/${NODE_FULL}.tar.gz +tar -xzf ~/.local/node/${NODE_FULL}.tar.gz -C ~/.local/node/ +ln -s ~/.local/node/${NODE_FULL}/bin/node ~/.local/bin/node +ln -s ~/.local/node/${NODE_FULL}/bin/npm ~/.local/bin/npm +export PATH=~/.local/bin:$PATH +npm i -g dredd +ln -s ~/.local/node/${NODE_FULL}/bin/dredd ~/.local/bin/dredd diff --git a/scripts/get_tools.sh b/scripts/get_tools.sh deleted file mode 100755 index d8c17df11..000000000 --- a/scripts/get_tools.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash -set -e - -# This file downloads all of the binary dependencies we have, and checks out a -# specific git hash. -# -# repos it installs: -# github.com/golang/dep/cmd/dep -# github.com/gogo/protobuf/protoc-gen-gogo -# github.com/square/certstrap -# github.com/mitchellh/gox -# github.com/golangci/golangci-lint -# github.com/petermattis/goid -# github.com/sasha-s/go-deadlock -# goimports - -## check if GOPATH is set -if [ -z ${GOPATH+x} ]; then - echo "please set GOPATH (https://github.com/golang/go/wiki/SettingGOPATH)" - exit 1 -fi - -mkdir -p "$GOPATH/src/github.com" -cd "$GOPATH/src/github.com" || exit 1 - -installFromGithub() { - repo=$1 - commit=$2 - # optional - subdir=$3 - echo "--> Installing $repo ($commit)..." - if [ ! -d "$repo" ]; then - mkdir -p "$repo" - git clone "https://github.com/$repo.git" "$repo" - fi - if [ ! -z ${subdir+x} ] && [ ! -d "$repo/$subdir" ]; then - echo "ERROR: no such directory $repo/$subdir" - exit 1 - fi - pushd "$repo" && \ - git fetch origin && \ - git checkout -q "$commit" && \ - if [ ! -z ${subdir+x} ]; then cd "$subdir" || exit 1; fi && \ - go install && \ - if [ ! -z ${subdir+x} ]; then cd - || exit 1; fi && \ - popd || exit 1 - echo "--> Done" - echo "" -} - -######################## DEVELOPER TOOLS ##################################### -installFromGithub gogo/protobuf 61dbc136cf5d2f08d68a011382652244990a53a9 protoc-gen-gogo - -installFromGithub square/certstrap e27060a3643e814151e65b9807b6b06d169580a7 - -# used to build tm-monitor & tm-bench binaries -installFromGithub mitchellh/gox 51ed453898ca5579fea9ad1f08dff6b121d9f2e8 - -## golangci-lint v1.13.2 -installFromGithub golangci/golangci-lint 7b2421d55194c9dc385eff7720a037aa9244ca3c cmd/golangci-lint - -## make test_with_deadlock -## XXX: https://github.com/tendermint/tendermint/issues/3242 -installFromGithub petermattis/goid b0b1615b78e5ee59739545bb38426383b2cda4c9 -installFromGithub sasha-s/go-deadlock d68e2bc52ae3291765881b9056f2c1527f245f1e -go get golang.org/x/tools/cmd/goimports diff --git a/scripts/gitian-build.sh b/scripts/gitian-build.sh index a7a6acec3..fbe475015 100755 --- a/scripts/gitian-build.sh +++ b/scripts/gitian-build.sh @@ -8,7 +8,7 @@ set -euo pipefail GITIAN_CACHE_DIRNAME='.gitian-builder-cache' -GO_DEBIAN_RELEASE='1.12.5-1' +GO_DEBIAN_RELEASE='1.12.8-1' GO_TARBALL="golang-debian-${GO_DEBIAN_RELEASE}.tar.gz" GO_TARBALL_URL="https://salsa.debian.org/go-team/compiler/golang/-/archive/debian/${GO_DEBIAN_RELEASE}/${GO_TARBALL}" diff --git a/scripts/gitian-descriptors/gitian-darwin.yml b/scripts/gitian-descriptors/gitian-darwin.yml index 03ba1f1a4..58b4f0cb8 100644 --- a/scripts/gitian-descriptors/gitian-darwin.yml +++ b/scripts/gitian-descriptors/gitian-darwin.yml @@ -23,11 +23,11 @@ remotes: - "url": "https://github.com/tendermint/tendermint.git" "dir": "tendermint" files: -- "golang-debian-1.12.5-1.tar.gz" +- "golang-debian-1.12.8-1.tar.gz" script: | set -e -o pipefail - GO_SRC_RELEASE=golang-debian-1.12.5-1 + GO_SRC_RELEASE=golang-debian-1.12.8-1 GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" # Compile go and configure the environment export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" diff --git a/scripts/gitian-descriptors/gitian-linux.yml b/scripts/gitian-descriptors/gitian-linux.yml index f1c31c40e..6969d41d7 100644 --- a/scripts/gitian-descriptors/gitian-linux.yml +++ b/scripts/gitian-descriptors/gitian-linux.yml @@ -23,11 +23,11 @@ remotes: - "url": "https://github.com/tendermint/tendermint.git" "dir": "tendermint" files: -- "golang-debian-1.12.5-1.tar.gz" +- "golang-debian-1.12.8-1.tar.gz" script: | set -e -o pipefail - GO_SRC_RELEASE=golang-debian-1.12.5-1 + GO_SRC_RELEASE=golang-debian-1.12.8-1 GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" # Compile go and configure the environment export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" diff --git a/scripts/gitian-descriptors/gitian-windows.yml b/scripts/gitian-descriptors/gitian-windows.yml index 80b2e60d3..3215e7814 100644 --- a/scripts/gitian-descriptors/gitian-windows.yml +++ b/scripts/gitian-descriptors/gitian-windows.yml @@ -23,11 +23,11 @@ remotes: - "url": "https://github.com/tendermint/tendermint.git" "dir": "tendermint" files: -- "golang-debian-1.12.5-1.tar.gz" +- "golang-debian-1.12.8-1.tar.gz" script: | set -e -o pipefail - GO_SRC_RELEASE=golang-debian-1.12.5-1 + GO_SRC_RELEASE=golang-debian-1.12.8-1 GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" # Compile go and configure the environment export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" diff --git a/scripts/install-golangci-lint.sh b/scripts/install-golangci-lint.sh new file mode 100644 index 000000000..b95713828 --- /dev/null +++ b/scripts/install-golangci-lint.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -euo pipefail + +f_sha256() { + local l_file + l_file=$1 + python -sBc "import hashlib;print(hashlib.sha256(open('$l_file','rb').read()).hexdigest())" +} + +installer="$(mktemp)" +trap "rm -f ${installer}" EXIT + +GOBIN="${1}" +VERSION="${2}" +HASHSUM="${3}" +CURL="$(which curl)" + +echo "Downloading golangci-lint ${VERSION} installer ..." >&2 +"${CURL}" -sfL "https://raw.githubusercontent.com/golangci/golangci-lint/${VERSION}/install.sh" > "${installer}" + +echo "Checking hashsum ..." >&2 +[ "${HASHSUM}" = "$(f_sha256 ${installer})" ] +chmod +x "${installer}" + +echo "Launching installer ..." >&2 +exec "${installer}" -d -b "${GOBIN}" "${VERSION}" diff --git a/scripts/install/install_tendermint_arm.sh b/scripts/install/install_tendermint_arm.sh index 085ba82f4..cd2e18c9f 100644 --- a/scripts/install/install_tendermint_arm.sh +++ b/scripts/install/install_tendermint_arm.sh @@ -31,7 +31,7 @@ cd "$GOPATH/src/$REPO" git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH -make get_tools +make tools make install # the binary is located in $GOPATH/bin diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh index 294155d0e..c69e6269c 100644 --- a/scripts/install/install_tendermint_bsd.sh +++ b/scripts/install/install_tendermint_bsd.sh @@ -46,7 +46,7 @@ cd "$GOPATH/src/$REPO" # build & install master git checkout $BRANCH -gmake get_tools +gmake tools gmake install # the binary is located in $GOPATH/bin diff --git a/scripts/install/install_tendermint_osx.sh b/scripts/install/install_tendermint_osx.sh index ee799f66a..be2c4d0ec 100644 --- a/scripts/install/install_tendermint_osx.sh +++ b/scripts/install/install_tendermint_osx.sh @@ -36,5 +36,5 @@ cd $GOPATH/src/$REPO git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH -make get_tools +make tools make install diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh index 2e5558ff6..b562498f6 100644 --- a/scripts/install/install_tendermint_ubuntu.sh +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -40,7 +40,7 @@ cd "$GOPATH/src/$REPO" git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH -make get_tools +make tools make install # the binary is located in $GOPATH/bin diff --git a/scripts/privValUpgrade_test.go b/scripts/privValUpgrade_test.go index bac4d315f..7caf38798 100644 --- a/scripts/privValUpgrade_test.go +++ b/scripts/privValUpgrade_test.go @@ -75,6 +75,7 @@ func TestLoadAndUpgrade(t *testing.T) { }, } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { // need to re-write the file everytime because upgrading renames it err := ioutil.WriteFile(oldFilePath, []byte(oldPrivvalContent), 0600) diff --git a/state/execution_test.go b/state/execution_test.go index 38301df73..02d13b353 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -210,6 +210,7 @@ func TestValidateValidatorUpdates(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { err := sm.ValidateValidatorUpdates(tc.abciUpdates, tc.validatorParams) if tc.shouldErr { @@ -275,6 +276,7 @@ func TestUpdateValidators(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { updates, err := types.PB2TM.ValidatorUpdates(tc.abciUpdates) assert.NoError(t, err) diff --git a/state/state_test.go b/state/state_test.go index 062e62bb5..bd4935ea8 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -645,7 +645,7 @@ func TestLargeGenesisValidator(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) - genesisVotingPower := int64(types.MaxTotalVotingPower / 1000) + genesisVotingPower := types.MaxTotalVotingPower / 1000 genesisPubKey := ed25519.GenPrivKey().PubKey() // fmt.Println("genesis addr: ", genesisPubKey.Address()) genesisVal := &types.Validator{Address: genesisPubKey.Address(), PubKey: genesisPubKey, VotingPower: genesisVotingPower} diff --git a/state/store_test.go b/state/store_test.go index 4549e8f89..0a190446e 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -70,6 +70,7 @@ func BenchmarkLoadValidators(b *testing.B) { sm.SaveState(stateDB, state) for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... + i := i sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) { diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index a0c833e49..189a9da61 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -118,6 +118,7 @@ func TestTxSearch(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.q, func(t *testing.T) { results, err := indexer.Search(query.MustParse(tc.q)) assert.NoError(t, err) @@ -191,6 +192,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.q, func(t *testing.T) { results, err := indexer.Search(query.MustParse(tc.q)) require.NoError(t, err) diff --git a/store/codec.go b/store/codec.go index 67b838c01..4895e8994 100644 --- a/store/codec.go +++ b/store/codec.go @@ -2,11 +2,11 @@ package store import ( amino "github.com/tendermint/go-amino" - cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" + "github.com/tendermint/tendermint/types" ) var cdc = amino.NewCodec() func init() { - cryptoAmino.RegisterAmino(cdc) + types.RegisterBlockAmino(cdc) } diff --git a/store/store.go b/store/store.go index 73c9ad010..c16d5efec 100644 --- a/store/store.go +++ b/store/store.go @@ -4,7 +4,8 @@ import ( "fmt" "sync" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" + dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/types" @@ -67,7 +68,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. - panic(cmn.ErrorWrap(err, "Error reading block")) + panic(errors.Wrap(err, "Error reading block")) } return block } @@ -83,7 +84,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { } err := cdc.UnmarshalBinaryBare(bz, part) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block part")) + panic(errors.Wrap(err, "Error reading block part")) } return part } @@ -98,7 +99,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } err := cdc.UnmarshalBinaryBare(bz, blockMeta) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block meta")) + panic(errors.Wrap(err, "Error reading block meta")) } return blockMeta } @@ -115,7 +116,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { } err := cdc.UnmarshalBinaryBare(bz, commit) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block commit")) + panic(errors.Wrap(err, "Error reading block commit")) } return commit } @@ -131,7 +132,7 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { } err := cdc.UnmarshalBinaryBare(bz, commit) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block seen commit")) + panic(errors.Wrap(err, "Error reading block seen commit")) } return commit } diff --git a/store/store_test.go b/store/store_test.go index 2d83aecc1..fd148f7b9 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -9,13 +9,13 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" db "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db" cfg "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" sm "github.com/tendermint/tendermint/state" @@ -53,7 +53,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu stateDB := dbm.NewMemDB() state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) if err != nil { - panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) + panic(errors.Wrap(err, "error constructing state from genesis file")) } return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) } } @@ -84,6 +84,7 @@ func TestNewBlockStore(t *testing.T) { } for i, tt := range panicCausers { + tt := tt // Expecting a panic here on trying to parse an invalid blockStore _, _, panicErr := doFn(func() (interface{}, error) { db.Set(blockStoreKey, tt.data) @@ -253,6 +254,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } for i, tuple := range tuples { + tuple := tuple bs, db := freshBlockStore() // SaveBlock res, err, panicErr := doFn(func() (interface{}, error) { diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index b39277bd9..fb5458e82 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12 +FROM golang:1.13 # Add testing deps for curl RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list @@ -19,7 +19,7 @@ COPY . $REPO # Install the vendored dependencies # docker caching prevents reinstall on code change! -RUN make get_tools +RUN make tools # install ABCI CLI RUN make install_abci diff --git a/tests.mk b/tests.mk new file mode 100644 index 000000000..18caef496 --- /dev/null +++ b/tests.mk @@ -0,0 +1,106 @@ +#!/usr/bin/make -f + +######################################## +### Testing + +BINDIR ?= $(GOPATH)/bin + +## required to be run first by most tests +build_docker_test_image: + docker build -t tester -f ./test/docker/Dockerfile . + +### coverage, app, persistence, and libs tests +test_cover: + # run the go unit tests with coverage + bash test/test_cover.sh + +test_apps: + # run the app tests using bash + # requires `abci-cli` and `tendermint` binaries installed + bash test/app/test.sh + +test_abci_apps: + bash abci/tests/test_app/test.sh + +test_abci_cli: + # test the cli against the examples in the tutorial at: + # ./docs/abci-cli.md + # if test fails, update the docs ^ + @ bash abci/tests/test_cli/test.sh + +test_persistence: + # run the persistence tests using bash + # requires `abci-cli` installed + docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh + + # TODO undockerize + # bash test/persist/test_failure_indices.sh + +test_p2p: + docker rm -f rsyslog || true + rm -rf test/logs || true + mkdir test/logs + cd test/ + docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog + cd .. + # requires 'tester' the image from above + bash test/p2p/test.sh tester + # the `docker cp` takes a really long time; uncomment for debugging + # + # mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs + +test_integrations: + make build_docker_test_image + make tools + make install + make test_cover + make test_apps + make test_abci_apps + make test_abci_cli + make test_libs + make test_persistence + make test_p2p + +test_release: + @go test -tags release $(PACKAGES) + +test100: + @for i in {1..100}; do make test; done + +vagrant_test: + vagrant up + vagrant ssh -c 'make test_integrations' + +### go tests +test: + @echo "--> Running go test" + @go test -p 1 $(PACKAGES) + +test_race: + @echo "--> Running go test --race" + @go test -p 1 -v -race $(PACKAGES) + +# uses https://github.com/sasha-s/go-deadlock/ to detect potential deadlocks +test_with_deadlock: + make set_with_deadlock + make test + make cleanup_after_test_with_deadlock + +set_with_deadlock: + @echo "Get Goid" + @go get github.com/petermattis/goid@b0b1615b78e5ee59739545bb38426383b2cda4c9 + @echo "Get Go-Deadlock" + @go get github.com/sasha-s/go-deadlock@d68e2bc52ae3291765881b9056f2c1527f245f1e + find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.RWMutex/deadlock.RWMutex/' + find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.Mutex/deadlock.Mutex/' + find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w + +# cleanes up after you ran test_with_deadlock +cleanup_after_test_with_deadlock: + find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.RWMutex/sync.RWMutex/' + find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.Mutex/sync.Mutex/' + find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w + # cleans up the deps to not include the need libs + go mod tidy + +.PHONY: test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test diff --git a/tools/build/Makefile b/tools/build/Makefile index 8c33ffd5d..df1387068 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -64,7 +64,7 @@ build-tendermint: git-branch gopath-setup @echo "*** Building tendermint" go get -d -u github.com/tendermint/tendermint/cmd/tendermint cd $(GOPATH)/src/github.com/tendermint/tendermint && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint get_tools build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint tools build cp $(GOPATH)/src/github.com/tendermint/tendermint/build/tendermint $(GOPATH)/bin @echo "*** Built tendermint" @@ -87,7 +87,7 @@ build-basecoind: git-branch gopath-setup @echo "*** Building basecoind from cosmos-sdk" go get -d -u github.com/cosmos/cosmos-sdk/examples/basecoin/cmd/basecoind cd $(GOPATH)/src/github.com/cosmos/cosmos-sdk && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk get_tools build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk tools build cp $(GOPATH)/src/github.com/cosmos/cosmos-sdk/build/basecoind $(GOPATH)/bin/basecoind @echo "*** Built basecoind from cosmos-sdk" diff --git a/tools/tm-bench/Dockerfile.dev b/tools/tm-bench/Dockerfile.dev index 1151965a2..73c263336 100644 --- a/tools/tm-bench/Dockerfile.dev +++ b/tools/tm-bench/Dockerfile.dev @@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/tendermint/tendermint/tools/tm-bench COPY Makefile /go/src/github.com/tendermint/tendermint/tools/tm-bench/ -RUN make get_tools +RUN make tools COPY . /go/src/github.com/tendermint/tendermint/tools/tm-bench diff --git a/tools/tm-monitor/Dockerfile.dev b/tools/tm-monitor/Dockerfile.dev index e593bf89c..347c7f0fb 100644 --- a/tools/tm-monitor/Dockerfile.dev +++ b/tools/tm-monitor/Dockerfile.dev @@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/tendermint/tools/tm-monitor COPY Makefile /go/src/github.com/tendermint/tools/tm-monitor/ -RUN make get_tools +RUN make tools COPY . /go/src/github.com/tendermint/tools/tm-monitor diff --git a/tools/tm-monitor/README.md b/tools/tm-monitor/README.md index 2bd367b99..1a8dfffc7 100644 --- a/tools/tm-monitor/README.md +++ b/tools/tm-monitor/README.md @@ -86,6 +86,6 @@ websocket. ## Development ``` -make get_tools +make tools make test ``` diff --git a/tools/tm-monitor/monitor/network.go b/tools/tm-monitor/monitor/network.go index 4d85d7ed6..45cf2ac3c 100644 --- a/tools/tm-monitor/monitor/network.go +++ b/tools/tm-monitor/monitor/network.go @@ -85,7 +85,7 @@ func (n *Network) NewBlock(b tmtypes.Header) { } else { n.AvgBlockTime = 0.0 } - n.txThroughputMeter.Mark(int64(b.NumTxs)) + n.txThroughputMeter.Mark(b.NumTxs) n.AvgTxThroughput = n.txThroughputMeter.Rate1() } diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 7fefdfb42..216cf6851 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -49,7 +49,7 @@ var _ error = (*TestHarnessError)(nil) // with this version of Tendermint. type TestHarness struct { addr string - spv *privval.SignerValidatorEndpoint + signerClient *privval.SignerClient fpv *privval.FilePV chainID string acceptRetries int @@ -101,14 +101,19 @@ func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, err } logger.Info("Loaded genesis file", "chainID", st.ChainID) - spv, err := newTestHarnessSocketVal(logger, cfg) + spv, err := newTestHarnessListener(logger, cfg) + if err != nil { + return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") + } + + signerClient, err := privval.NewSignerClient(spv) if err != nil { return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } return &TestHarness{ addr: cfg.BindAddr, - spv: spv, + signerClient: signerClient, fpv: fpv, chainID: st.ChainID, acceptRetries: cfg.AcceptRetries, @@ -135,9 +140,11 @@ func (th *TestHarness) Run() { th.logger.Info("Starting test harness") accepted := false var startErr error + for acceptRetries := th.acceptRetries; acceptRetries > 0; acceptRetries-- { th.logger.Info("Attempting to accept incoming connection", "acceptRetries", acceptRetries) - if err := th.spv.Start(); err != nil { + + if err := th.signerClient.WaitForConnection(10 * time.Millisecond); err != nil { // if it wasn't a timeout error if _, ok := err.(timeoutError); !ok { th.logger.Error("Failed to start listener", "err", err) @@ -149,6 +156,7 @@ func (th *TestHarness) Run() { } startErr = err } else { + th.logger.Info("Accepted external connection") accepted = true break } @@ -182,8 +190,8 @@ func (th *TestHarness) Run() { func (th *TestHarness) TestPublicKey() error { th.logger.Info("TEST: Public key of remote signer") th.logger.Info("Local", "pubKey", th.fpv.GetPubKey()) - th.logger.Info("Remote", "pubKey", th.spv.GetPubKey()) - if th.fpv.GetPubKey() != th.spv.GetPubKey() { + th.logger.Info("Remote", "pubKey", th.signerClient.GetPubKey()) + if th.fpv.GetPubKey() != th.signerClient.GetPubKey() { th.logger.Error("FAILED: Local and remote public keys do not match") return newTestHarnessError(ErrTestPublicKeyFailed, nil, "") } @@ -211,7 +219,7 @@ func (th *TestHarness) TestSignProposal() error { Timestamp: time.Now(), } propBytes := prop.SignBytes(th.chainID) - if err := th.spv.SignProposal(th.chainID, prop); err != nil { + if err := th.signerClient.SignProposal(th.chainID, prop); err != nil { th.logger.Error("FAILED: Signing of proposal", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") } @@ -222,7 +230,7 @@ func (th *TestHarness) TestSignProposal() error { return newTestHarnessError(ErrTestSignProposalFailed, err, "") } // now validate the signature on the proposal - if th.spv.GetPubKey().VerifyBytes(propBytes, prop.Signature) { + if th.signerClient.GetPubKey().VerifyBytes(propBytes, prop.Signature) { th.logger.Info("Successfully validated proposal signature") } else { th.logger.Error("FAILED: Proposal signature validation failed") @@ -255,7 +263,7 @@ func (th *TestHarness) TestSignVote() error { } voteBytes := vote.SignBytes(th.chainID) // sign the vote - if err := th.spv.SignVote(th.chainID, vote); err != nil { + if err := th.signerClient.SignVote(th.chainID, vote); err != nil { th.logger.Error("FAILED: Signing of vote", "err", err) return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } @@ -266,7 +274,7 @@ func (th *TestHarness) TestSignVote() error { return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } // now validate the signature on the proposal - if th.spv.GetPubKey().VerifyBytes(voteBytes, vote.Signature) { + if th.signerClient.GetPubKey().VerifyBytes(voteBytes, vote.Signature) { th.logger.Info("Successfully validated vote signature", "type", voteType) } else { th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) @@ -301,10 +309,9 @@ func (th *TestHarness) Shutdown(err error) { }() } - if th.spv.IsRunning() { - if err := th.spv.Stop(); err != nil { - th.logger.Error("Failed to cleanly stop listener: %s", err.Error()) - } + err = th.signerClient.Close() + if err != nil { + th.logger.Error("Failed to cleanly stop listener: %s", err.Error()) } if th.exitWhenComplete { @@ -312,9 +319,8 @@ func (th *TestHarness) Shutdown(err error) { } } -// newTestHarnessSocketVal creates our client instance which we will use for -// testing. -func newTestHarnessSocketVal(logger log.Logger, cfg TestHarnessConfig) (*privval.SignerValidatorEndpoint, error) { +// newTestHarnessListener creates our client instance which we will use for testing. +func newTestHarnessListener(logger log.Logger, cfg TestHarnessConfig) (*privval.SignerListenerEndpoint, error) { proto, addr := cmn.ProtocolAndAddress(cfg.BindAddr) if proto == "unix" { // make sure the socket doesn't exist - if so, try to delete it @@ -329,7 +335,7 @@ func newTestHarnessSocketVal(logger log.Logger, cfg TestHarnessConfig) (*privval if err != nil { return nil, err } - logger.Info("Listening at", "proto", proto, "addr", addr) + logger.Info("Listening", "proto", proto, "addr", addr) var svln net.Listener switch proto { case "unix": @@ -347,7 +353,7 @@ func newTestHarnessSocketVal(logger log.Logger, cfg TestHarnessConfig) (*privval logger.Error("Unsupported protocol (must be unix:// or tcp://)", "proto", proto) return nil, newTestHarnessError(ErrInvalidParameters, nil, fmt.Sprintf("Unsupported protocol: %s", proto)) } - return privval.NewSignerValidatorEndpoint(logger, svln), nil + return privval.NewSignerListenerEndpoint(logger, svln), nil } func newTestHarnessError(code int, err error, info string) *TestHarnessError { diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index c249bd2b6..47e510666 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -3,19 +3,18 @@ package internal import ( "fmt" "io/ioutil" - "net" "os" "testing" "time" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" ) const ( @@ -85,8 +84,8 @@ func TestRemoteSignerTestHarnessMaxAcceptRetriesReached(t *testing.T) { func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, th.fpv.Key.PrivKey, false, false) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, false) }, NoError, ) @@ -95,8 +94,8 @@ func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, ed25519.GenPrivKey(), false, false) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, ed25519.GenPrivKey(), false, false) }, ErrTestPublicKeyFailed, ) @@ -105,8 +104,8 @@ func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { func TestRemoteSignerProposalSigningFailed(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, th.fpv.Key.PrivKey, true, false) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, th.fpv.Key.PrivKey, true, false) }, ErrTestSignProposalFailed, ) @@ -115,28 +114,30 @@ func TestRemoteSignerProposalSigningFailed(t *testing.T) { func TestRemoteSignerVoteSigningFailed(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, th.fpv.Key.PrivKey, false, true) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, true) }, ErrTestSignVoteFailed, ) } -func newMockRemoteSigner(t *testing.T, th *TestHarness, privKey crypto.PrivKey, breakProposalSigning bool, breakVoteSigning bool) *privval.SignerServiceEndpoint { - return privval.NewSignerServiceEndpoint( +func newMockSignerServer(t *testing.T, th *TestHarness, privKey crypto.PrivKey, breakProposalSigning bool, breakVoteSigning bool) *privval.SignerServer { + mockPV := types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning) + + dialerEndpoint := privval.NewSignerDialerEndpoint( th.logger, - th.chainID, - types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning), privval.DialTCPFn( th.addr, time.Duration(defaultConnDeadline)*time.Millisecond, ed25519.GenPrivKey(), ), ) + + return privval.NewSignerServer(dialerEndpoint, th.chainID, mockPV) } // For running relatively standard tests. -func harnessTest(t *testing.T, rsMaker func(th *TestHarness) *privval.SignerServiceEndpoint, expectedExitCode int) { +func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval.SignerServer, expectedExitCode int) { cfg := makeConfig(t, 100, 3) defer cleanup(cfg) @@ -148,10 +149,10 @@ func harnessTest(t *testing.T, rsMaker func(th *TestHarness) *privval.SignerServ th.Run() }() - rs := rsMaker(th) - require.NoError(t, rs.Start()) - assert.True(t, rs.IsRunning()) - defer rs.Stop() + ss := signerServerMaker(th) + require.NoError(t, ss.Start()) + assert.True(t, ss.IsRunning()) + defer ss.Stop() <-donec assert.Equal(t, expectedExitCode, th.exitCode) @@ -159,7 +160,7 @@ func harnessTest(t *testing.T, rsMaker func(th *TestHarness) *privval.SignerServ func makeConfig(t *testing.T, acceptDeadline, acceptRetries int) TestHarnessConfig { return TestHarnessConfig{ - BindAddr: testFreeTCPAddr(t), + BindAddr: privval.GetFreeLocalhostAddrPort(), KeyFile: makeTempFile("tm-testharness-keyfile", keyFileContents), StateFile: makeTempFile("tm-testharness-statefile", stateFileContents), GenesisFile: makeTempFile("tm-testharness-genesisfile", genesisFileContents), @@ -191,12 +192,3 @@ func makeTempFile(name, content string) string { } return tempFile.Name() } - -// testFreeTCPAddr claims a free port so we don't block on listener being ready. -func testFreeTCPAddr(t *testing.T) string { - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer ln.Close() - - return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) -} diff --git a/types/block.go b/types/block.go index 5dc0ff6a7..537aed2e7 100644 --- a/types/block.go +++ b/types/block.go @@ -748,7 +748,7 @@ func (sh SignedHeader) ValidateBasic(chainID string) error { // ValidateBasic on the Commit. err := sh.Commit.ValidateBasic() if err != nil { - return cmn.ErrorWrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") + return errors.Wrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") } return nil } diff --git a/types/block_test.go b/types/block_test.go index ff7edd27a..1bf2a15ff 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -15,6 +15,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" ) @@ -83,6 +84,8 @@ func TestBlockValidateBasic(t *testing.T) { }, true}, } for i, tc := range testCases { + tc := tc + i := i t.Run(tc.testName, func(t *testing.T) { block := MakeBlock(h, txs, commit, evList) block.ProposerAddress = valSet.GetProposer().Address @@ -228,6 +231,7 @@ func TestCommitValidateBasic(t *testing.T) { {"Incorrect round", func(com *Commit) { com.Precommits[0].Round = 100 }, true}, } for _, tc := range testCases { + tc := tc t.Run(tc.testName, func(t *testing.T) { com := randCommit() tc.malleateCommit(com) @@ -302,6 +306,7 @@ func TestBlockMaxDataBytes(t *testing.T) { } for i, tc := range testCases { + tc := tc if tc.panics { assert.Panics(t, func() { MaxDataBytes(tc.maxBytes, tc.valsCount, tc.evidenceCount) @@ -330,6 +335,7 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { } for i, tc := range testCases { + tc := tc if tc.panics { assert.Panics(t, func() { MaxDataBytesUnknownEvidence(tc.maxBytes, tc.valsCount) @@ -366,3 +372,150 @@ func TestCommitToVoteSet(t *testing.T) { assert.Equal(t, vote1bz, vote3bz) } } + +func TestCommitToVoteSetWithVotesForAnotherBlockOrNilBlock(t *testing.T) { + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) + blockID3 := makeBlockID([]byte("blockhash3"), 10000, []byte("partshash")) + + height := int64(3) + round := 1 + + type commitVoteTest struct { + blockIDs []BlockID + numVotes []int // must sum to numValidators + numValidators int + valid bool + } + + testCases := []commitVoteTest{ + {[]BlockID{blockID, blockID2, blockID3}, []int{8, 1, 1}, 10, true}, + {[]BlockID{blockID, blockID2, blockID3}, []int{67, 20, 13}, 100, true}, + {[]BlockID{blockID, blockID2, blockID3}, []int{1, 1, 1}, 3, false}, + {[]BlockID{blockID, blockID2, blockID3}, []int{3, 1, 1}, 5, false}, + {[]BlockID{blockID, {}}, []int{67, 33}, 100, true}, + {[]BlockID{blockID, blockID2, {}}, []int{10, 5, 5}, 20, false}, + } + + for _, tc := range testCases { + voteSet, valSet, vals := randVoteSet(height-1, 1, PrecommitType, tc.numValidators, 1) + + vi := 0 + for n := range tc.blockIDs { + for i := 0; i < tc.numVotes[n]; i++ { + addr := vals[vi].GetPubKey().Address() + vote := &Vote{ + ValidatorAddress: addr, + ValidatorIndex: vi, + Height: height - 1, + Round: round, + Type: PrecommitType, + BlockID: tc.blockIDs[n], + Timestamp: tmtime.Now(), + } + + _, err := signAddVote(vals[vi], vote, voteSet) + assert.NoError(t, err) + vi++ + } + } + if tc.valid { + commit := voteSet.MakeCommit() // panics without > 2/3 valid votes + assert.NotNil(t, commit) + err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, commit) + assert.Nil(t, err) + } else { + assert.Panics(t, func() { voteSet.MakeCommit() }) + } + } +} + +func TestSignedHeaderValidateBasic(t *testing.T) { + commit := randCommit() + chainID := "𠜎" + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + h := Header{ + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: chainID, + Height: commit.Height(), + Time: timestamp, + NumTxs: math.MaxInt64, + TotalTxs: math.MaxInt64, + LastBlockID: commit.BlockID, + LastCommitHash: commit.Hash(), + DataHash: commit.Hash(), + ValidatorsHash: commit.Hash(), + NextValidatorsHash: commit.Hash(), + ConsensusHash: commit.Hash(), + AppHash: commit.Hash(), + LastResultsHash: commit.Hash(), + EvidenceHash: commit.Hash(), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + } + + validSignedHeader := SignedHeader{Header: &h, Commit: commit} + validSignedHeader.Commit.BlockID.Hash = validSignedHeader.Hash() + invalidSignedHeader := SignedHeader{} + + testCases := []struct { + testName string + shHeader *Header + shCommit *Commit + expectErr bool + }{ + {"Valid Signed Header", validSignedHeader.Header, validSignedHeader.Commit, false}, + {"Invalid Signed Header", invalidSignedHeader.Header, validSignedHeader.Commit, true}, + {"Invalid Signed Header", validSignedHeader.Header, invalidSignedHeader.Commit, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + sh := SignedHeader{ + Header: tc.shHeader, + Commit: tc.shCommit, + } + assert.Equal(t, tc.expectErr, sh.ValidateBasic(validSignedHeader.Header.ChainID) != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBlockIDValidateBasic(t *testing.T) { + validBlockID := BlockID{ + Hash: cmn.HexBytes{}, + PartsHeader: PartSetHeader{ + Total: 1, + Hash: cmn.HexBytes{}, + }, + } + + invalidBlockID := BlockID{ + Hash: []byte{0}, + PartsHeader: PartSetHeader{ + Total: -1, + Hash: cmn.HexBytes{}, + }, + } + + testCases := []struct { + testName string + blockIDHash cmn.HexBytes + blockIDPartsHeader PartSetHeader + expectErr bool + }{ + {"Valid BlockID", validBlockID.Hash, validBlockID.PartsHeader, false}, + {"Invalid BlockID", invalidBlockID.Hash, validBlockID.PartsHeader, true}, + {"Invalid BlockID", validBlockID.Hash, invalidBlockID.PartsHeader, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + blockID := BlockID{ + Hash: tc.blockIDHash, + PartsHeader: tc.blockIDPartsHeader, + } + assert.Equal(t, tc.expectErr, blockID.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 45590217f..f7631b6d5 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -338,6 +338,7 @@ func BenchmarkEventBus(b *testing.B) { } for _, bm := range benchmarks { + bm := bm b.Run(bm.name, func(b *testing.B) { benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) }) diff --git a/types/evidence_test.go b/types/evidence_test.go index 1f1338cad..3c943e38f 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -146,6 +146,7 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { }, true}, } for _, tc := range testCases { + tc := tc t.Run(tc.testName, func(t *testing.T) { ev := &DuplicateVoteEvidence{ PubKey: secp256k1.GenPrivKey().PubKey(), @@ -157,3 +158,13 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { }) } } + +func TestMockGoodEvidenceValidateBasic(t *testing.T) { + goodEvidence := NewMockGoodEvidence(int64(1), 1, []byte{1}) + assert.Nil(t, goodEvidence.ValidateBasic()) +} + +func TestMockBadEvidenceValidateBasic(t *testing.T) { + badEvidence := MockBadEvidence{MockGoodEvidence: NewMockGoodEvidence(int64(1), 1, []byte{1})} + assert.Nil(t, badEvidence.ValidateBasic()) +} diff --git a/types/genesis.go b/types/genesis.go index de59fc87e..94680bca8 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -7,6 +7,8 @@ import ( "io/ioutil" "time" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" tmtime "github.com/tendermint/tendermint/types/time" @@ -64,10 +66,10 @@ func (genDoc *GenesisDoc) ValidatorHash() []byte { // and fills in defaults for optional fields left empty func (genDoc *GenesisDoc) ValidateAndComplete() error { if genDoc.ChainID == "" { - return cmn.NewError("Genesis doc must include non-empty chain_id") + return errors.New("Genesis doc must include non-empty chain_id") } if len(genDoc.ChainID) > MaxChainIDLen { - return cmn.NewError("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) + return errors.Errorf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) } if genDoc.ConsensusParams == nil { @@ -78,10 +80,10 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { for i, v := range genDoc.Validators { if v.Power == 0 { - return cmn.NewError("The genesis file cannot contain validators with no voting power: %v", v) + return errors.Errorf("The genesis file cannot contain validators with no voting power: %v", v) } if len(v.Address) > 0 && !bytes.Equal(v.PubKey.Address(), v.Address) { - return cmn.NewError("Incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) + return errors.Errorf("Incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) } if len(v.Address) == 0 { genDoc.Validators[i].Address = v.PubKey.Address() @@ -117,11 +119,11 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { jsonBlob, err := ioutil.ReadFile(genDocFile) if err != nil { - return nil, cmn.ErrorWrap(err, "Couldn't read GenesisDoc file") + return nil, errors.Wrap(err, "Couldn't read GenesisDoc file") } genDoc, err := GenesisDocFromJSON(jsonBlob) if err != nil { - return nil, cmn.ErrorWrap(err, fmt.Sprintf("Error reading GenesisDoc at %v", genDocFile)) + return nil, errors.Wrap(err, fmt.Sprintf("Error reading GenesisDoc at %v", genDocFile)) } return genDoc, nil } diff --git a/types/params.go b/types/params.go index 162aaeada..c9ab4aaf7 100644 --- a/types/params.go +++ b/types/params.go @@ -1,6 +1,8 @@ package types import ( + "github.com/pkg/errors" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" @@ -95,38 +97,38 @@ func (params *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { // allowed limits, and returns an error if they are not. func (params *ConsensusParams) Validate() error { if params.Block.MaxBytes <= 0 { - return cmn.NewError("Block.MaxBytes must be greater than 0. Got %d", + return errors.Errorf("Block.MaxBytes must be greater than 0. Got %d", params.Block.MaxBytes) } if params.Block.MaxBytes > MaxBlockSizeBytes { - return cmn.NewError("Block.MaxBytes is too big. %d > %d", + return errors.Errorf("Block.MaxBytes is too big. %d > %d", params.Block.MaxBytes, MaxBlockSizeBytes) } if params.Block.MaxGas < -1 { - return cmn.NewError("Block.MaxGas must be greater or equal to -1. Got %d", + return errors.Errorf("Block.MaxGas must be greater or equal to -1. Got %d", params.Block.MaxGas) } if params.Block.TimeIotaMs <= 0 { - return cmn.NewError("Block.TimeIotaMs must be greater than 0. Got %v", + return errors.Errorf("Block.TimeIotaMs must be greater than 0. Got %v", params.Block.TimeIotaMs) } if params.Evidence.MaxAge <= 0 { - return cmn.NewError("EvidenceParams.MaxAge must be greater than 0. Got %d", + return errors.Errorf("EvidenceParams.MaxAge must be greater than 0. Got %d", params.Evidence.MaxAge) } if len(params.Validator.PubKeyTypes) == 0 { - return cmn.NewError("len(Validator.PubKeyTypes) must be greater than 0") + return errors.New("len(Validator.PubKeyTypes) must be greater than 0") } // Check if keyType is a known ABCIPubKeyType for i := 0; i < len(params.Validator.PubKeyTypes); i++ { keyType := params.Validator.PubKeyTypes[i] if _, ok := ABCIPubKeyTypesToAminoNames[keyType]; !ok { - return cmn.NewError("params.Validator.PubKeyTypes[%d], %s, is an unknown pubkey type", + return errors.Errorf("params.Validator.PubKeyTypes[%d], %s, is an unknown pubkey type", i, keyType) } } diff --git a/types/part_set_test.go b/types/part_set_test.go index daa2fa5c5..37aacea75 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -95,6 +95,7 @@ func TestPartSetHeaderValidateBasic(t *testing.T) { {"Invalid Hash", func(psHeader *PartSetHeader) { psHeader.Hash = make([]byte, 1) }, true}, } for _, tc := range testCases { + tc := tc t.Run(tc.testName, func(t *testing.T) { data := cmn.RandBytes(testPartSize * 100) ps := NewPartSetFromData(data, testPartSize) @@ -117,6 +118,7 @@ func TestPartValidateBasic(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.testName, func(t *testing.T) { data := cmn.RandBytes(testPartSize * 100) ps := NewPartSetFromData(data, testPartSize) diff --git a/types/priv_validator.go b/types/priv_validator.go index 8acab243a..45d0a67b5 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -12,6 +12,7 @@ import ( // PrivValidator defines the functionality of a local Tendermint validator // that signs votes and proposals, and never double signs. type PrivValidator interface { + // TODO: Extend the interface to return errors too. Issue: https://github.com/tendermint/tendermint/issues/3602 GetPubKey() crypto.PubKey SignVote(chainID string, vote *Vote) error diff --git a/types/proposal_test.go b/types/proposal_test.go index f1c048e1d..3a1368072 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -127,6 +127,7 @@ func TestProposalValidateBasic(t *testing.T) { blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) for _, tc := range testCases { + tc := tc t.Run(tc.testName, func(t *testing.T) { prop := NewProposal( 4, 2, 2, diff --git a/types/results.go b/types/results.go index d7d82d894..6b0c37562 100644 --- a/types/results.go +++ b/types/results.go @@ -41,7 +41,7 @@ func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { } } -// Bytes serializes the ABCIResponse using wire +// Bytes serializes the ABCIResponse using amino func (a ABCIResults) Bytes() []byte { bz, err := cdc.MarshalBinaryLengthPrefixed(a) if err != nil { diff --git a/types/tx.go b/types/tx.go index 0c6845a7d..b71c70029 100644 --- a/types/tx.go +++ b/types/tx.go @@ -133,6 +133,6 @@ type TxResult struct { // fieldNum is also 1 (see BinFieldNum in amino.MarshalBinaryBare). func ComputeAminoOverhead(tx Tx, fieldNum int) int64 { fnum := uint64(fieldNum) - typ3AndFieldNum := (uint64(fnum) << 3) | uint64(amino.Typ3_ByteLength) + typ3AndFieldNum := (fnum << 3) | uint64(amino.Typ3_ByteLength) return int64(amino.UvarintSize(typ3AndFieldNum)) + int64(amino.UvarintSize(uint64(len(tx)))) } diff --git a/types/validator_set.go b/types/validator_set.go index 33636d092..8cb8a2ffd 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -2,15 +2,15 @@ package types import ( "bytes" - "errors" "fmt" "math" "math/big" "sort" "strings" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" - cmn "github.com/tendermint/tendermint/libs/common" ) // MaxTotalVotingPower - the maximum allowed total voting power. @@ -681,13 +681,13 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin continue } if precommit.Height != height { - return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round) + return errors.Errorf("Blocks don't match - %d vs %d", round, precommit.Round) } if precommit.Round != round { - return cmn.NewError("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) } if precommit.Type != PrecommitType { - return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) + return errors.Errorf("Invalid commit -- not precommit @ index %v", idx) } // See if this validator is in oldVals. oldIdx, val := oldVals.GetByAddress(precommit.ValidatorAddress) @@ -699,7 +699,7 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin // Validate signature. precommitSignBytes := commit.VoteSignBytes(chainID, idx) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { - return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) + return errors.Errorf("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! if blockID.Equals(precommit.BlockID) { @@ -721,15 +721,8 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin // ErrTooMuchChange func IsErrTooMuchChange(err error) bool { - switch err_ := err.(type) { - case cmn.Error: - _, ok := err_.Data().(errTooMuchChange) - return ok - case errTooMuchChange: - return true - default: - return false - } + _, ok := errors.Cause(err).(errTooMuchChange) + return ok } type errTooMuchChange struct { diff --git a/types/vote.go b/types/vote.go index 6fcbd3ff6..8f5eee7fb 100644 --- a/types/vote.go +++ b/types/vote.go @@ -12,7 +12,8 @@ import ( const ( // MaxVoteBytes is a maximum vote size (including amino overhead). - MaxVoteBytes int64 = 223 + MaxVoteBytes int64 = 223 + nilVoteStr string = "nil-Vote" ) var ( @@ -84,7 +85,7 @@ func (vote *Vote) Copy() *Vote { func (vote *Vote) String() string { if vote == nil { - return "nil-Vote" + return nilVoteStr } var typeString string switch vote.Type { diff --git a/types/vote_set.go b/types/vote_set.go index a4a42bb4c..56dd9a13c 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -434,7 +434,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string { voteStrings := make([]string, len(voteSet.votes)) for i, vote := range voteSet.votes { if vote == nil { - voteStrings[i] = "nil-Vote" + voteStrings[i] = nilVoteStr } else { voteStrings[i] = vote.String() } @@ -499,7 +499,7 @@ func (voteSet *VoteSet) voteStrings() []string { voteStrings := make([]string, len(voteSet.votes)) for i, vote := range voteSet.votes { if vote == nil { - voteStrings[i] = "nil-Vote" + voteStrings[i] = nilVoteStr } else { voteStrings[i] = vote.String() } diff --git a/types/vote_test.go b/types/vote_test.go index b6eb1f586..42a6bbd9f 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -285,6 +285,7 @@ func TestVoteValidateBasic(t *testing.T) { {"Too big Signature", func(v *Vote) { v.Signature = make([]byte, MaxSignatureSize+1) }, true}, } for _, tc := range testCases { + tc := tc t.Run(tc.testName, func(t *testing.T) { vote := examplePrecommit() err := privVal.SignVote("test_chain_id", vote) diff --git a/version/version.go b/version/version.go index 9fb7c7869..e1c01c4c2 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.32.2" + TMCoreSemVer = "0.32.3" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.16.1"