Browse Source

Merge branch 'master' into wb/rfc-015

pull/8033/head
William Banfield 3 years ago
committed by GitHub
parent
commit
433ac6e282
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
203 changed files with 5831 additions and 4743 deletions
  1. +4
    -1
      .github/CODEOWNERS
  2. +3
    -3
      .github/workflows/build.yml
  3. +2
    -2
      .github/workflows/docker.yml
  4. +1
    -1
      .github/workflows/e2e-manual.yml
  5. +1
    -1
      .github/workflows/e2e-nightly-34x.yml
  6. +1
    -1
      .github/workflows/e2e-nightly-35x.yml
  7. +1
    -1
      .github/workflows/e2e-nightly-master.yml
  8. +1
    -1
      .github/workflows/e2e.yml
  9. +1
    -1
      .github/workflows/fuzz-nightly.yml
  10. +1
    -1
      .github/workflows/jepsen.yml
  11. +1
    -1
      .github/workflows/linkchecker.yml
  12. +6
    -3
      .github/workflows/lint.yml
  13. +1
    -1
      .github/workflows/linter.yml
  14. +18
    -16
      .github/workflows/markdown-links.yml
  15. +0
    -24
      .github/workflows/proto-check.yml
  16. +0
    -64
      .github/workflows/proto-dockerfile.yml
  17. +21
    -0
      .github/workflows/proto-lint.yml
  18. +1
    -1
      .github/workflows/release.yml
  19. +2
    -2
      .github/workflows/tests.yml
  20. +21
    -0
      CHANGELOG.md
  21. +1
    -0
      CHANGELOG_PENDING.md
  22. +25
    -3
      CONTRIBUTING.md
  23. +37
    -24
      Makefile
  24. +1
    -0
      README.md
  25. +0
    -33
      abci/client/creators.go
  26. +0
    -85
      abci/client/socket_client_test.go
  27. +20
    -17
      abci/cmd/abci-cli/abci-cli.go
  28. +4
    -4
      abci/example/example_test.go
  29. +14
    -14
      abci/example/kvstore/kvstore.go
  30. +9
    -10
      abci/example/kvstore/kvstore_test.go
  31. +1
    -1
      abci/tests/server/client.go
  32. +2
    -2
      abci/tests/test_cli/ex1.abci
  33. +5
    -3
      abci/tests/test_cli/ex1.abci.out
  34. +3
    -3
      abci/tests/test_cli/ex2.abci
  35. +5
    -5
      abci/tests/test_cli/ex2.abci.out
  36. +3
    -3
      abci/types/application.go
  37. +1
    -1
      abci/types/messages_test.go
  38. +10
    -0
      abci/types/result.go
  39. +1505
    -1446
      abci/types/types.pb.go
  40. +7
    -12
      buf.gen.yaml
  41. +3
    -0
      buf.work.yaml
  42. +1
    -1
      cmd/tendermint/commands/reindex_event.go
  43. +2
    -2
      cmd/tendermint/commands/reindex_event_test.go
  44. +76
    -3
      cmd/tendermint/commands/reset_priv_validator.go
  45. +1
    -0
      cmd/tendermint/main.go
  46. +16
    -16
      docs/app-dev/abci-cli.md
  47. +4
    -1
      docs/architecture/README.md
  48. +1
    -1
      docs/architecture/adr-044-lite-client-with-weak-subjectivity.md
  49. +1
    -1
      docs/architecture/adr-045-abci-evidence.md
  50. +3
    -2
      docs/architecture/adr-075-rpc-subscription.md
  51. +23
    -5
      docs/architecture/adr-template.md
  52. +1
    -1
      docs/nodes/configuration.md
  53. +1
    -1
      docs/roadmap/roadmap.md
  54. +95
    -0
      docs/tendermint-core/consensus/proposer-based-timestamps.md
  55. +2
    -2
      docs/tutorials/go-built-in.md
  56. +2
    -2
      docs/tutorials/go.md
  57. +1
    -0
      docs/versions
  58. +8
    -4
      go.mod
  59. +4
    -3
      go.sum
  60. +1
    -1
      internal/blocksync/pool.go
  61. +25
    -19
      internal/blocksync/reactor.go
  62. +10
    -5
      internal/blocksync/reactor_test.go
  63. +165
    -158
      internal/consensus/byzantine_test.go
  64. +27
    -12
      internal/consensus/common_test.go
  65. +33
    -10
      internal/consensus/invalid_test.go
  66. +11
    -11
      internal/consensus/mempool_test.go
  67. +2
    -6
      internal/consensus/reactor.go
  68. +102
    -25
      internal/consensus/reactor_test.go
  69. +21
    -20
      internal/consensus/replay.go
  70. +26
    -20
      internal/consensus/replay_file.go
  71. +4
    -15
      internal/consensus/replay_stubs.go
  72. +46
    -87
      internal/consensus/replay_test.go
  73. +64
    -31
      internal/consensus/state.go
  74. +73
    -0
      internal/consensus/state_test.go
  75. +30
    -23
      internal/consensus/wal_generator.go
  76. +9
    -10
      internal/consensus/wal_test.go
  77. +0
    -32
      internal/eventbus/event_bus.go
  78. +2
    -2
      internal/eventbus/event_bus_test.go
  79. +33
    -35
      internal/evidence/pool.go
  80. +53
    -48
      internal/evidence/pool_test.go
  81. +5
    -4
      internal/evidence/reactor_test.go
  82. +34
    -26
      internal/evidence/verify_test.go
  83. +1
    -1
      internal/inspect/inspect_test.go
  84. +5
    -5
      internal/libs/autofile/autofile.go
  85. +13
    -0
      internal/libs/autofile/group.go
  86. +12
    -0
      internal/libs/flowrate/flowrate.go
  87. +1
    -1
      internal/libs/queue/queue_test.go
  88. +6
    -8
      internal/mempool/mempool.go
  89. +19
    -19
      internal/mempool/mempool_test.go
  90. +1
    -1
      internal/mempool/mock/mempool.go
  91. +3
    -3
      internal/mempool/reactor_test.go
  92. +1
    -1
      internal/mempool/types.go
  93. +2
    -2
      internal/p2p/conn/connection.go
  94. +8
    -8
      internal/p2p/conn/secret_connection_test.go
  95. +3
    -4
      internal/p2p/p2ptest/network.go
  96. +7
    -3
      internal/p2p/p2ptest/require.go
  97. +16
    -9
      internal/p2p/peermanager.go
  98. +37
    -13
      internal/p2p/peermanager_test.go
  99. +93
    -134
      internal/p2p/pex/reactor.go
  100. +9
    -12
      internal/p2p/pex/reactor_test.go

+ 4
- 1
.github/CODEOWNERS View File

@ -7,4 +7,7 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir
# Spec related changes can be approved by the protocol design team
/spec @josef-widder @milosevic @cason

+ 3
- 3
.github/workflows/build.yml View File

@ -23,7 +23,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
@ -44,7 +44,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
@ -66,7 +66,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |


+ 2
- 2
.github/workflows/docker.yml View File

@ -13,7 +13,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
@ -43,7 +43,7 @@ jobs:
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.13.0
uses: docker/login-action@v1.14.1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}


+ 1
- 1
.github/workflows/e2e-manual.yml View File

@ -19,7 +19,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e


+ 1
- 1
.github/workflows/e2e-nightly-34x.yml View File

@ -24,7 +24,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
with:
ref: 'v0.34.x'


+ 1
- 1
.github/workflows/e2e-nightly-35x.yml View File

@ -24,7 +24,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
with:
ref: 'v0.35.x'


+ 1
- 1
.github/workflows/e2e-nightly-master.yml View File

@ -23,7 +23,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e


+ 1
- 1
.github/workflows/e2e.yml View File

@ -17,7 +17,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |


+ 1
- 1
.github/workflows/fuzz-nightly.yml View File

@ -17,7 +17,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Install go-fuzz
working-directory: test/fuzz


+ 1
- 1
.github/workflows/jepsen.yml View File

@ -46,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the Jepsen repository
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
with:
repository: 'tendermint/jepsen'


+ 1
- 1
.github/workflows/linkchecker.yml View File

@ -6,7 +6,7 @@ jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
with:
folder-path: "docs"

+ 6
- 3
.github/workflows/lint.yml View File

@ -13,17 +13,20 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: actions/setup-go@v2
with:
go-version: '^1.17'
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: golangci/golangci-lint-action@v2.5.2
- uses: golangci/golangci-lint-action@v3.1.0
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.42.1
version: v1.44
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

+ 1
- 1
.github/workflows/linter.yml View File

@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
- name: Lint Code Base
uses: docker://github/super-linter:v4
env:


+ 18
- 16
.github/workflows/markdown-links.yml View File

@ -1,17 +1,19 @@
name: Check Markdown links
# TODO: Re-enable when https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/126 lands.
on:
push:
branches:
- master
pull_request:
branches: [master]
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
with:
check-modified-files-only: 'yes'
#name: Check Markdown links
#
#on:
# push:
# branches:
# - master
# pull_request:
# branches: [master]
#
#jobs:
# markdown-link-check:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v3
# - uses: gaurav-nelson/github-action-markdown-link-check@v1.0.13
# with:
# check-modified-files-only: 'yes'

+ 0
- 24
.github/workflows/proto-check.yml View File

@ -1,24 +0,0 @@
name: Proto Check
# Protobuf runs buf (https://buf.build/) lint and check-breakage
# This workflow is only run when a file in the proto directory
# has been modified.
on:
workflow_dispatch: # allow running workflow manually
pull_request:
paths:
- "proto/*"
jobs:
proto-lint:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- name: check-breakage
run: make proto-check-breaking-ci

+ 0
- 64
.github/workflows/proto-dockerfile.yml View File

@ -1,64 +0,0 @@
# This workflow (re)builds and pushes a Docker image containing the
# protobuf build tools used by the other workflows.
#
# When making changes that require updates to the builder image, you
# should merge the updates first and wait for this workflow to complete,
# so that the changes will be available for the dependent workflows.
#
name: Build & Push Proto Builder Image
on:
pull_request:
paths:
- "proto/*"
push:
branches:
- master
paths:
- "proto/*"
schedule:
# run this job once a month to recieve any go or buf updates
- cron: "0 9 1 * *"
env:
REGISTRY: ghcr.io
IMAGE_NAME: tendermint/docker-build-proto
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
- name: Check out and assign tags
id: prep
run: |
DOCKER_IMAGE="${REGISTRY}/${IMAGE_NAME}"
VERSION=noop
if [[ "$GITHUB_REF" == "refs/tags/*" ]]; then
VERSION="${GITHUB_REF#refs/tags/}"
elif [[ "$GITHUB_REF" == "refs/heads/*" ]]; then
VERSION="$(echo "${GITHUB_REF#refs/heads/}" | sed -r 's#/+#-#g')"
if [[ "${{ github.event.repository.default_branch }}" = "$VERSION" ]]; then
VERSION=latest
fi
fi
TAGS="${DOCKER_IMAGE}:${VERSION}"
echo ::set-output name=tags::"${TAGS}"
- name: Set up docker buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Log in to the container registry
uses: docker/login-action@v1.13.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v2.9.0
with:
context: ./proto
file: ./proto/Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}

+ 21
- 0
.github/workflows/proto-lint.yml View File

@ -0,0 +1,21 @@
name: Protobuf Lint
on:
pull_request:
paths:
- 'proto/**'
push:
branches:
- master
paths:
- 'proto/**'
jobs:
lint:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.1.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'proto'

+ 1
- 1
.github/workflows/release.yml View File

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
with:
fetch-depth: 0


+ 2
- 2
.github/workflows/tests.yml View File

@ -19,7 +19,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
@ -41,7 +41,7 @@ jobs:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |


+ 21
- 0
CHANGELOG.md View File

@ -2,6 +2,27 @@
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35.2
February 28, 2022
Special thanks to external contributors on this release: @ashcherbakov, @yihuang, @waelsy123
### IMPROVEMENTS
- [consensus] [\#7875](https://github.com/tendermint/tendermint/pull/7875) additional timing metrics. (@williambanfield)
### BUG FIXES
- [abci] [\#7990](https://github.com/tendermint/tendermint/pull/7990) revert buffer limit change. (@williambanfield)
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
- [cli] [\#7869](https://github.com/tendermint/tendermint/pull/7869) Update unsafe-reset-all command to match release v35. (waelsy123)
- [light] [\#7640](https://github.com/tendermint/tendermint/pull/7640) Light Client: fix absence proof verification (@ashcherbakov)
- [light] [\#7641](https://github.com/tendermint/tendermint/pull/7641) Light Client: fix querying against the latest height (@ashcherbakov)
- [mempool] [\#7718](https://github.com/tendermint/tendermint/pull/7718) return duplicate tx errors more consistently. (@tychoish)
- [rpc] [\#7744](https://github.com/tendermint/tendermint/pull/7744) fix layout of endpoint list. (@creachadair)
- [statesync] [\#7886](https://github.com/tendermint/tendermint/pull/7886) assert app version matches. (@cmwaters)
## v0.35.1
January 26, 2022


+ 1
- 0
CHANGELOG_PENDING.md View File

@ -19,6 +19,7 @@ Special thanks to external contributors on this release:
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
- [cli] \#8081 make the reset command safe to use. (@marbar3778)
- Apps


+ 25
- 3
CONTRIBUTING.md View File

@ -105,11 +105,33 @@ specify exactly the dependency you want to update, eg.
## Protobuf
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
across Tendermint Core.
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
To generate proto stubs, lint, and check protos for breaking changes, you will
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
of the repository, run:
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`.
```bash
# Lint all of the .proto files in proto/tendermint
make proto-lint
# Check if any of your local changes (prior to committing to the Git repository)
# are breaking
make proto-check-breaking
# Generate Go code from the .proto files in proto/tendermint
make proto-gen
```
To automatically format `.proto` files, you will need
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
installed, you can run:
```bash
make proto-format
```
### Visual Studio Code


+ 37
- 24
Makefile View File

@ -13,8 +13,6 @@ endif
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE)
CGO_ENABLED ?= 0
# handle nostrip
@ -73,41 +71,57 @@ install:
$(BUILDDIR)/:
mkdir -p $@
# The Docker image containing the generator, formatter, and linter.
# This is generated by proto/Dockerfile. To update tools, make changes
# there and run the Build & Push Proto Builder Image workflow.
IMAGE := ghcr.io/tendermint/docker-build-proto:latest
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE)
HTTPS_GIT := https://github.com/tendermint/tendermint.git
###############################################################################
### Protobuf ###
###############################################################################
proto-all: proto-lint proto-check-breaking
.PHONY: proto-all
check-proto-deps:
ifeq (,$(shell which buf))
$(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.")
endif
ifeq (,$(shell which protoc-gen-gogofaster))
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
endif
.PHONY: check-proto-deps
proto-gen:
check-proto-format-deps:
ifeq (,$(shell which clang-format))
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
endif
.PHONY: check-proto-format-deps
proto-gen: check-proto-deps
@echo "Generating Protobuf files"
@$(DOCKER_PROTO_BUILDER) buf generate --template=./buf.gen.yaml --config ./buf.yaml
@buf generate
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
.PHONY: proto-gen
proto-lint:
@$(DOCKER_PROTO_BUILDER) buf lint --error-format=json --config ./buf.yaml
# These targets are provided for convenience and are intended for local
# execution only.
proto-lint: check-proto-deps
@echo "Linting Protobuf files"
@buf lint
.PHONY: proto-lint
proto-format:
proto-format: check-proto-format-deps
@echo "Formatting Protobuf files"
@$(DOCKER_PROTO_BUILDER) find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
.PHONY: proto-format
proto-check-breaking:
@$(DOCKER_PROTO_BUILDER) buf breaking --against .git --config ./buf.yaml
proto-check-breaking: check-proto-deps
@echo "Checking for breaking changes in Protobuf files against local branch"
@echo "Note: This is only useful if your changes have not yet been committed."
@echo " Otherwise read up on buf's \"breaking\" command usage:"
@echo " https://docs.buf.build/breaking/usage"
@buf breaking --against ".git"
.PHONY: proto-check-breaking
proto-check-breaking-ci:
@$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT) --config ./buf.yaml
.PHONY: proto-check-breaking-ci
# TODO: Should be removed when work on ABCI++ is complete.
# For more information, see https://github.com/tendermint/tendermint/issues/8066
abci-proto-gen:
./scripts/abci-gen.sh
.PHONY: abci-proto-gen
###############################################################################
### Build ABCI ###
@ -222,9 +236,7 @@ build-docs:
mkdir -p ~/output/$${path_prefix} ; \
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
cp ~/output/$${path_prefix}/index.html ~/output ; \
done < versions ; \
mkdir -p ~/output/master ; \
cp -r .vuepress/dist/* ~/output/master/
done < versions ;
.PHONY: build-docs
###############################################################################
@ -331,3 +343,4 @@ split-test-packages:$(BUILDDIR)/packages.txt
split -d -n l/$(NUM_SPLIT) $< $<.
test-group-%:split-test-packages
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out

+ 1
- 0
README.md View File

@ -127,6 +127,7 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap
- [Terra](https://www.terra.money/)
- [Celestia](https://celestia.org/)
- [Anoma](https://anoma.network/)
- [Vocdoni](https://docs.vocdoni.io/)
### Research


+ 0
- 33
abci/client/creators.go View File

@ -1,33 +0,0 @@
package abciclient
import (
"fmt"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
// Creator creates new ABCI clients.
type Creator func(log.Logger) (Client, error)
// NewLocalCreator returns a Creator for the given app,
// which will be running locally.
func NewLocalCreator(app types.Application) Creator {
return func(logger log.Logger) (Client, error) {
return NewLocalClient(logger, app), nil
}
}
// NewRemoteCreator returns a Creator for the given address (e.g.
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
// want the client to connect before reporting success.
func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator {
return func(log.Logger) (Client, error) {
remoteApp, err := NewClient(logger, addr, transport, mustConnect)
if err != nil {
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
}
return remoteApp, nil
}
}

+ 0
- 85
abci/client/socket_client_test.go View File

@ -1,85 +0,0 @@
package abciclient_test
import (
"context"
"fmt"
"testing"
"time"
"math/rand"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
)
func TestProperSyncCalls(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
app := slowApp{}
logger := log.NewNopLogger()
_, c := setupClientServer(ctx, t, logger, app)
resp := make(chan error, 1)
go func() {
rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
assert.NoError(t, c.Flush(ctx))
assert.NotNil(t, rsp)
select {
case <-ctx.Done():
case resp <- c.Error():
}
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.NoError(t, err, "This should return success")
}
}
func setupClientServer(
ctx context.Context,
t *testing.T,
logger log.Logger,
app types.Application,
) (service.Service, abciclient.Client) {
t.Helper()
// some port between 20k and 30k
port := 20000 + rand.Int31()%10000
addr := fmt.Sprintf("localhost:%d", port)
s, err := server.NewServer(logger, addr, "socket", app)
require.NoError(t, err)
require.NoError(t, s.Start(ctx))
t.Cleanup(s.Wait)
c := abciclient.NewSocketClient(logger, addr, true)
require.NoError(t, c.Start(ctx))
t.Cleanup(c.Wait)
require.True(t, s.IsRunning())
require.True(t, c.IsRunning())
return s, c
}
type slowApp struct {
types.BaseApplication
}
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseFinalizeBlock{}
}

+ 20
- 17
abci/cmd/abci-cli/abci-cli.go View File

@ -125,7 +125,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) {
cmd.AddCommand(consoleCmd)
cmd.AddCommand(echoCmd)
cmd.AddCommand(infoCmd)
cmd.AddCommand(deliverTxCmd)
cmd.AddCommand(finalizeBlockCmd)
cmd.AddCommand(checkTxCmd)
cmd.AddCommand(commitCmd)
cmd.AddCommand(versionCmd)
@ -150,10 +150,9 @@ where example.file looks something like:
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
finalize_block 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
finalize_block 0x01 0x04 0xff
info
`,
Args: cobra.ExactArgs(0),
@ -169,7 +168,7 @@ This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"},
RunE: cmdConsole,
}
@ -188,11 +187,11 @@ var infoCmd = &cobra.Command{
RunE: cmdInfo,
}
var deliverTxCmd = &cobra.Command{
Use: "deliver_tx",
Short: "deliver a new transaction to the application",
Long: "deliver a new transaction to the application",
Args: cobra.ExactArgs(1),
var finalizeBlockCmd = &cobra.Command{
Use: "finalize_block",
Short: "deliver a block of transactions to the application",
Long: "deliver a block of transactions to the application",
Args: cobra.MinimumNArgs(1),
RunE: cmdFinalizeBlock,
}
@ -426,7 +425,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
return cmdCheckTx(cmd, actualArgs)
case "commit":
return cmdCommit(cmd, actualArgs)
case "deliver_tx":
case "finalize_block":
return cmdFinalizeBlock(cmd, actualArgs)
case "echo":
return cmdEcho(cmd, actualArgs)
@ -500,19 +499,23 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "want the tx",
Log: "Must provide at least one transaction",
})
return nil
}
txBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
txs := make([][]byte, len(args))
for i, arg := range args {
txBytes, err := stringOrHexToBytes(arg)
if err != nil {
return err
}
txs[i] = txBytes
}
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs})
if err != nil {
return err
}
for _, tx := range res.Txs {
for _, tx := range res.TxResults {
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,


+ 4
- 4
abci/example/example_test.go View File

@ -84,8 +84,8 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap
// Send bulk request
res, err := client.FinalizeBlock(ctx, rfb)
require.NoError(t, err)
require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match")
for _, tx := range res.Txs {
require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match")
for _, tx := range res.TxResults {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
@ -138,8 +138,8 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
// Send request
response, err := client.FinalizeBlock(ctx, &rfb)
require.NoError(t, err, "Error in GRPC FinalizeBlock")
require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match")
for _, tx := range response.Txs {
require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match")
for _, tx := range response.TxResults {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
}

+ 14
- 14
abci/example/kvstore/kvstore.go View File

@ -117,7 +117,7 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *Application) handleTx(tx []byte) *types.ResponseDeliverTx {
func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(tx) {
@ -156,7 +156,7 @@ func (app *Application) handleTx(tx []byte) *types.ResponseDeliverTx {
},
}
return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events}
}
func (app *Application) Close() error {
@ -190,12 +190,12 @@ func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.Resp
}
}
respTxs := make([]*types.ResponseDeliverTx, len(req.Txs))
respTxs := make([]*types.ExecTxResult, len(req.Txs))
for i, tx := range req.Txs {
respTxs[i] = app.handleTx(tx)
}
return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates}
return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}
}
func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
@ -338,13 +338,13 @@ func isValidatorTx(tx []byte) bool {
// format is "val:pubkey!power"
// pubkey is a base64-encoded 32-byte ed25519 key
func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
func (app *Application) execValidatorTx(tx []byte) *types.ExecTxResult {
tx = tx[len(ValidatorSetChangePrefix):]
// get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "!")
if len(pubKeyAndPower) != 2 {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
}
@ -353,7 +353,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
// decode the pubkey
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
if err != nil {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
}
@ -361,7 +361,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
// decode the power
power, err := strconv.ParseInt(powerS, 10, 64)
if err != nil {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
}
@ -371,7 +371,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
}
// add, update, or remove a validator
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx {
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ExecTxResult {
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
panic(fmt.Errorf("can't decode public key: %w", err))
@ -386,7 +386,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response
}
if !hasKey {
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
}
@ -398,7 +398,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
if err := types.WriteMessage(&v, value); err != nil {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("error encoding validator: %v", err)}
}
@ -411,7 +411,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response
// we only update the changes array if we successfully updated the tree
app.ValUpdates = append(app.ValUpdates, v)
return &types.ResponseDeliverTx{Code: code.CodeTypeOK}
return &types.ExecTxResult{Code: code.CodeTypeOK}
}
// -----------------------------
@ -425,9 +425,9 @@ func isPrepareTx(tx []byte) bool {
// execPrepareTx is noop. tx data is considered as placeholder
// and is substitute at the PrepareProposal.
func (app *Application) execPrepareTx(tx []byte) *types.ResponseDeliverTx {
func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
// noop
return &types.ResponseDeliverTx{}
return &types.ExecTxResult{}
}
// substPrepareTx subst all the preparetx in the blockdata


+ 9
- 10
abci/example/kvstore/kvstore_test.go View File

@ -27,12 +27,12 @@ const (
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// repeating tx doesn't raise error
ar = app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// commit
app.Commit()
@ -107,7 +107,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := tmproto.Header{
Height: height,
}
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height})
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@ -196,7 +196,6 @@ func makeApplyBlock(
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Height: height,
Txs: txs,
})
@ -326,13 +325,13 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// repeating FinalizeBlock doesn't raise error
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// commit
_, err = app.Commit(ctx)
require.NoError(t, err)


+ 1
- 1
abci/tests/server/client.go View File

@ -51,7 +51,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
for i, tx := range res.Txs {
for i, tx := range res.TxResults {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp[i] {
fmt.Println("Failed test: FinalizeBlock")


+ 2
- 2
abci/tests/test_cli/ex1.abci View File

@ -1,10 +1,10 @@
echo hello
info
commit
deliver_tx "abc"
finalize_block "abc"
info
commit
query "abc"
deliver_tx "def=xyz"
finalize_block "def=xyz" "ghi=123"
commit
query "def"

+ 5
- 3
abci/tests/test_cli/ex1.abci.out View File

@ -12,7 +12,7 @@
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
> finalize_block "abc"
-> code: OK
> info
@ -33,12 +33,14 @@
-> value: abc
-> value.hex: 616263
> deliver_tx "def=xyz"
> finalize_block "def=xyz" "ghi=123"
-> code: OK
> finalize_block "def=xyz" "ghi=123"
-> code: OK
> commit
-> code: OK
-> data.hex: 0x0400000000000000
-> data.hex: 0x0600000000000000
> query "def"
-> code: OK


+ 3
- 3
abci/tests/test_cli/ex2.abci View File

@ -1,7 +1,7 @@
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
finalize_block 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
finalize_block 0x01
finalize_block 0x04
info

+ 5
- 5
abci/tests/test_cli/ex2.abci.out View File

@ -4,20 +4,20 @@
> check_tx 0xff
-> code: OK
> deliver_tx 0x00
> finalize_block 0x00
-> code: OK
> check_tx 0x00
-> code: OK
> deliver_tx 0x01
> finalize_block 0x01
-> code: OK
> deliver_tx 0x04
> finalize_block 0x04
-> code: OK
> info
-> code: OK
-> data: {"hashes":0,"txs":3}
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
-> data: {"size":3}
-> data.hex: 0x7B2273697A65223A337D

+ 3
- 3
abci/types/application.go View File

@ -103,12 +103,12 @@ func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProce
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
txs := make([]*ResponseDeliverTx, len(req.Txs))
txs := make([]*ExecTxResult, len(req.Txs))
for i := range req.Txs {
txs[i] = &ResponseDeliverTx{Code: CodeTypeOK}
txs[i] = &ExecTxResult{Code: CodeTypeOK}
}
return ResponseFinalizeBlock{
Txs: txs,
TxResults: txs,
}
}


+ 1
- 1
abci/types/messages_test.go View File

@ -13,7 +13,7 @@ import (
)
func TestMarshalJSON(t *testing.T) {
b, err := json.Marshal(&ResponseDeliverTx{})
b, err := json.Marshal(&ExecTxResult{Code: 1})
assert.NoError(t, err)
// include empty fields.
assert.True(t, strings.Contains(string(b), "code"))


+ 10
- 0
abci/types/result.go View File

@ -33,6 +33,16 @@ func (r ResponseDeliverTx) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ExecTxResult) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ExecTxResult) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ResponseQuery) IsOK() bool {
return r.Code == CodeTypeOK


+ 1505
- 1446
abci/types/types.pb.go
File diff suppressed because it is too large
View File


+ 7
- 12
buf.gen.yaml View File

@ -1,14 +1,9 @@
# The version of the generation template (required).
# The only currently-valid value is v1beta1.
version: v1beta1
# The plugins to run.
version: v1
plugins:
# The name of the plugin.
- name: gogofaster
# The directory where the generated proto output will be written.
# The directory is relative to where the generation tool was run.
out: proto
# Set options to assign import paths to the well-known types
# and to enable service generation.
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
out: ./proto/
opt:
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
- plugins=grpc
- paths=source_relative

+ 3
- 0
buf.work.yaml View File

@ -0,0 +1,3 @@
version: v1
directories:
- proto

+ 1
- 1
cmd/tendermint/commands/reindex_event.go View File

@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
Height: b.Height,
Index: uint32(i),
Tx: b.Data.Txs[i],
Result: *(r.FinalizeBlock.Txs[i]),
Result: *(r.FinalizeBlock.TxResults[i]),
}
_ = batch.Add(&tr)


+ 2
- 2
cmd/tendermint/commands/reindex_event_test.go View File

@ -153,10 +153,10 @@ func TestReIndexEvent(t *testing.T) {
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ResponseDeliverTx{}
dtx := abcitypes.ExecTxResult{}
abciResp := &prototmstate.ABCIResponses{
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
Txs: []*abcitypes.ResponseDeliverTx{&dtx},
TxResults: []*abcitypes.ExecTxResult{&dtx},
},
}


+ 76
- 3
cmd/tendermint/commands/reset_priv_validator.go View File

@ -2,6 +2,7 @@ package commands
import (
"os"
"path/filepath"
"github.com/spf13/cobra"
@ -31,6 +32,20 @@ func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command
return cmd
}
// MakeResetStateCommand constructs a command that removes the database of
// the specified Tendermint core instance.
func MakeResetStateCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
return &cobra.Command{
Use: "reset-state",
Short: "Remove all the data and WAL",
RunE: func(cmd *cobra.Command, args []string) error {
return resetState(conf.DBDir(), logger, keyType)
},
}
}
func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
@ -55,18 +70,76 @@ func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *c
// it's only suitable for testnets.
// resetAll removes address book files plus all data, and resets the privValdiator data.
// Exported so other CLI tools can use it.
func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
}
// recreate the dbDir since the privVal state needs to live there
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
}
// resetState removes address book files plus all databases.
func resetState(dbDir string, logger log.Logger, keyType string) error {
blockdb := filepath.Join(dbDir, "blockstore.db")
state := filepath.Join(dbDir, "state.db")
wal := filepath.Join(dbDir, "cs.wal")
evidence := filepath.Join(dbDir, "evidence.db")
txIndex := filepath.Join(dbDir, "tx_index.db")
peerstore := filepath.Join(dbDir, "peerstore.db")
if tmos.FileExists(blockdb) {
if err := os.RemoveAll(blockdb); err == nil {
logger.Info("Removed all blockstore.db", "dir", blockdb)
} else {
logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err)
}
}
if tmos.FileExists(state) {
if err := os.RemoveAll(state); err == nil {
logger.Info("Removed all state.db", "dir", state)
} else {
logger.Error("error removing all state.db", "dir", state, "err", err)
}
}
if tmos.FileExists(wal) {
if err := os.RemoveAll(wal); err == nil {
logger.Info("Removed all cs.wal", "dir", wal)
} else {
logger.Error("error removing all cs.wal", "dir", wal, "err", err)
}
}
if tmos.FileExists(evidence) {
if err := os.RemoveAll(evidence); err == nil {
logger.Info("Removed all evidence.db", "dir", evidence)
} else {
logger.Error("error removing all evidence.db", "dir", evidence, "err", err)
}
}
if tmos.FileExists(txIndex) {
if err := os.RemoveAll(txIndex); err == nil {
logger.Info("Removed tx_index.db", "dir", txIndex)
} else {
logger.Error("error removing tx_index.db", "dir", txIndex, "err", err)
}
}
if tmos.FileExists(peerstore) {
if err := os.RemoveAll(peerstore); err == nil {
logger.Info("Removed peerstore.db", "dir", peerstore)
} else {
logger.Error("error removing peerstore.db", "dir", peerstore, "err", err)
}
}
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
return nil
}
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {


+ 1
- 0
cmd/tendermint/main.go View File

@ -34,6 +34,7 @@ func main() {
commands.MakeReplayCommand(conf, logger),
commands.MakeReplayConsoleCommand(conf, logger),
commands.MakeResetAllCommand(conf, logger),
commands.MakeResetStateCommand(conf, logger),
commands.MakeResetPrivateValidatorCommand(conf, logger),
commands.MakeShowValidatorCommand(conf, logger),
commands.MakeTestnetFilesCommand(conf, logger),


+ 16
- 16
docs/app-dev/abci-cli.md View File

@ -27,17 +27,17 @@ Usage:
abci-cli [command]
Available Commands:
batch Run a batch of abci commands against an application
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
deliver_tx Deliver a new tx to the application
kvstore ABCI demo example
echo Have the application echo a message
help Help about any command
info Get some info about the application
query Query the application state
set_option Set an options on the application
batch Run a batch of abci commands against an application
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
finalize_block Send a set of transactions to the application
kvstore ABCI demo example
echo Have the application echo a message
help Help about any command
info Get some info about the application
query Query the application state
set_option Set an options on the application
Flags:
--abci string socket or grpc (default "socket")
@ -53,7 +53,7 @@ Use "abci-cli [command] --help" for more information about a command.
The `abci-cli` tool lets us send ABCI messages to our application, to
help build and debug them.
The most important messages are `deliver_tx`, `check_tx`, and `commit`,
The most important messages are `finalize_block`, `check_tx`, and `commit`,
but there are others for convenience, configuration, and information
purposes.
@ -173,7 +173,7 @@ Try running these commands:
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
> finalize_block "abc"
-> code: OK
> info
@ -192,7 +192,7 @@ Try running these commands:
-> value: abc
-> value.hex: 616263
> deliver_tx "def=xyz"
> finalize_block "def=xyz"
-> code: OK
> commit
@ -207,8 +207,8 @@ Try running these commands:
-> value.hex: 78797A
```
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if
we do `finalize_block "abc=efg"` it will store `(abc, efg)`.
Similarly, you could put the commands in a file and run
`abci-cli --verbose batch < myfile`.


+ 4
- 1
docs/architecture/README.md View File

@ -86,13 +86,16 @@ Note the context/background should be written in the present tense.
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
- [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md)
### Deprecated
None
### Rejected
- [ADR-023: ABCI-Propose-tx](./adr-023-ABCI-propose-tx.md)
- [ADR-029: Check-Tx-Consensus](./adr-029-check-tx-consensus.md)
- [ADR-058: Event-Hashing](./adr-058-event-hashing.md)
### Proposed
- [ADR-007: Trust-Metric-Usage](./adr-007-trust-metric-usage.md)


+ 1
- 1
docs/architecture/adr-044-lite-client-with-weak-subjectivity.md View File

@ -84,7 +84,7 @@ The linear verification algorithm requires downloading all headers
between the `TrustHeight` and the `LatestHeight`. The lite client downloads the
full header for the provided `TrustHeight` and then proceeds to download `N+1`
headers and applies the [Tendermint validation
rules](https://docs.tendermint.com/master/spec/light-client/verification/)
rules](https://github.com/tendermint/tendermint/tree/master/spec/light-client/verification/README.md)
to each block.
### Bisecting Verification


+ 1
- 1
docs/architecture/adr-045-abci-evidence.md View File

@ -18,7 +18,7 @@ graceful here, but that's for another day.
It's possible to fool lite clients without there being a fork on the
main chain - so called Fork-Lite. See the
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
[fork accountability](https://github.com/tendermint/tendermint/blob/master/spec/light-client/accountability/README.md)
document for more details. For a sequential lite client, this can happen via
equivocation or amnesia attacks. For a skipping lite client this can also happen
via lunatic validator attacks. There must be some way for applications to punish


+ 3
- 2
docs/architecture/adr-075-rpc-subscription.md View File

@ -2,6 +2,7 @@
## Changelog
- 01-Mar-2022: Update long-polling interface (@creachadair).
- 10-Feb-2022: Updates to reflect implementation.
- 26-Jan-2022: Marked accepted.
- 22-Jan-2022: Updated and expanded (@creachadair).
@ -347,8 +348,8 @@ limit.
The `wait_time` parameter is used to effect polling. If `before` is empty and
no items are available, the server will wait for up to `wait_time` for matching
items to arrive at the head of the log. If `wait_time` is zero, the server will
return whatever eligible items are available immediately.
items to arrive at the head of the log. If `wait_time` is zero or negative, the
server will wait for a default (positive) interval.
If `before` non-empty, `wait_time` is ignored: new results are only added to
the head of the log, so there is no need to wait. This allows the client to


+ 23
- 5
docs/architecture/adr-template.md View File

@ -6,12 +6,30 @@
## Status
> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted"
> once it is agreed upon. Once the ADR has been implemented mark the ADR as
> "implemented". If a later ADR changes or reverses a decision, it may be marked
> as "deprecated" or "superseded" with a reference to its replacement.
> An architecture decision is considered "proposed" when a PR containing the ADR
> is submitted. When merged, an ADR must have a status associated with it, which
> must be one of: "Accepted", "Rejected", "Deprecated" or "Superseded".
>
> An accepted ADR's implementation status must be tracked via a tracking issue,
> milestone or project board (only one of these is necessary). For example:
>
> Accepted
>
> [Tracking issue](https://github.com/tendermint/tendermint/issues/123)
> [Milestone](https://github.com/tendermint/tendermint/milestones/123)
> [Project board](https://github.com/orgs/tendermint/projects/123)
>
> Rejected ADRs are captured as a record of recommendations that we specifically
> do not (and possibly never) want to implement. The ADR itself must, for
> posterity, include reasoning as to why it was rejected.
>
> If an ADR is deprecated, simply write "Deprecated" in this section. If an ADR
> is superseded by one or more other ADRs, provide local a reference to those
> ADRs, e.g.:
>
> Superseded by [ADR 123](./adr-123.md)
{Deprecated|Declined|Accepted|Implemented}
Accepted | Rejected | Deprecated | Superseded by
## Context


+ 1
- 1
docs/nodes/configuration.md View File

@ -594,7 +594,7 @@ This section will cover settings within the p2p section of the `config.toml`.
- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on.
- `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id.
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced.
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config parameters being deprecated and/or replaced.
We will cover the new and deprecated parameters below.
### New Parameters


+ 1
- 1
docs/roadmap/roadmap.md View File

@ -47,7 +47,7 @@ An overhaul of the existing interface between the application and consensus, to
### Proposer-Based Timestamps
Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
Proposer-based timestamps are a replacement of [BFT time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
### RPC Event Subscription


+ 95
- 0
docs/tendermint-core/consensus/proposer-based-timestamps.md View File

@ -0,0 +1,95 @@
---
order: 3
---
# PBTS
This document provides an overview of the Proposer-Based Timestamp (PBTS)
algorithm added to Tendermint in the v0.36 release. It outlines the core
functionality as well as the parameters and constraints of the this algorithm.
## Algorithm Overview
The PBTS algorithm defines a way for a Tendermint blockchain to create block
timestamps that are within a reasonable bound of the clocks of the validators on
the network. This replaces the original BFTTime algorithm for timestamp
assignment that relied on the timestamps included in precommit messages.
## Algorithm Parameters
The functionality of the PBTS algorithm is governed by two parameters within
Tendermint. These two parameters are [consensus
parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291),
meaning they are configured by the ABCI application and are expected to be the
same across all nodes on the network.
### `Precision`
The `Precision` parameter configures the acceptable upper-bound of clock drift
among all of the nodes on a Tendermint network. Any two nodes on a Tendermint
network are expected to have clocks that differ by at most `Precision`
milliseconds any given instant.
### `MessageDelay`
The `MessageDelay` parameter configures the acceptable upper-bound for
transmitting a `Proposal` message from the proposer to _all_ of the validators
on the network.
Networks should choose as small a value for `MessageDelay` as is practical,
provided it is large enough that messages can reach all participants with high
probability given the number of participants and latency of their connections.
## Algorithm Concepts
### Block timestamps
Each block produced by the Tendermint consensus engine contains a timestamp.
The timestamp produced in each block is a meaningful representation of time that is
useful for the protocols and applications built on top of Tendermint.
The following protocols and application features require a reliable source of time:
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21
days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime).
* IBC packets can use either a [timestamp or a height to timeout packet
delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements)
### Proposer Selects a Block Timestamp
When the proposer node creates a new block proposal, the node reads the time
from its local clock and uses this reading as the timestamp for the proposed
block.
### Timeliness
When each validator on a Tendermint network receives a proposed block, it
performs a series of checks to ensure that the block can be considered valid as
a candidate to be the next block in the chain.
The PBTS algorithm performs a validity check on the timestamp of proposed
blocks. When a validator receives a proposal it ensures that the timestamp in
the proposal is within a bound of the validator's local clock. Specifically, the
algorithm checks that the timestamp is no more than `Precision` greater than the
node's local clock and no less than `Precision` + `MessageDelay` behind than the
node's local clock. This creates range of acceptable timestamps around the
node's local time. If the timestamp is within this range, the PBTS algorithm
considers the block **timely**. If a block is not **timely**, the node will
issue a `nil` `prevote` for this block, signaling to the rest of the network
that the node does not consider the block to be valid.
### Clock Synchronization
The PBTS algorithm requires the clocks of the validators on a Tendermint network
are within `Precision` of each other. In practice, this means that validators
should periodically synchronize to a reliable NTP server. Validators that drift
too far away from the rest of the network will no longer propose blocks with
valid timestamps. Additionally they will not view the timestamps of blocks
proposed by their peers to be valid either.
## See Also
* [The PBTS specification](https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md)
contains all of the details of the algorithm.

+ 2
- 2
docs/tutorials/go-built-in.md View File

@ -212,7 +212,7 @@ etc.) by Tendermint Core.
Valid transactions will eventually be committed given they are not too big and
have enough gas. To learn more about gas, check out ["the
specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas).
specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas).
For the underlying key-value store we'll use
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
@ -331,7 +331,7 @@ func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery
```
The complete specification can be found
[here](https://docs.tendermint.com/master/spec/abci/).
[here](https://github.com/tendermint/tendermint/tree/master/spec/abci/).
## 1.4 Starting an application and a Tendermint Core instance in the same process


+ 2
- 2
docs/tutorials/go.md View File

@ -210,7 +210,7 @@ etc.) by Tendermint Core.
Valid transactions will eventually be committed given they are not too big and
have enough gas. To learn more about gas, check out ["the
specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas).
specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas).
For the underlying key-value store we'll use
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
@ -328,7 +328,7 @@ func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery
```
The complete specification can be found
[here](https://docs.tendermint.com/master/spec/abci/).
[here](https://github.com/tendermint/tendermint/tree/master/spec/abci/).
## 1.4 Starting an application and a Tendermint Core instances


+ 1
- 0
docs/versions View File

@ -1,3 +1,4 @@
master master
v0.33.x v0.33
v0.34.x v0.34
v0.35.x v0.35

+ 8
- 4
go.mod View File

@ -26,7 +26,7 @@ require (
github.com/rs/cors v1.8.2
github.com/rs/zerolog v1.26.1
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.3.0
github.com/spf13/cobra v1.4.0
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.7.0
github.com/tendermint/tm-db v0.6.6
@ -34,11 +34,17 @@ require (
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
google.golang.org/grpc v1.44.0
google.golang.org/grpc v1.45.0
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
pgregory.net/rapid v0.4.7
)
require (
github.com/creachadair/atomicfile v0.2.4
github.com/google/go-cmp v0.5.7
gotest.tools v2.2.0+incompatible
)
require (
4d63.com/gochecknoglobals v0.1.0 // indirect
github.com/Antonboom/errname v0.1.5 // indirect
@ -67,7 +73,6 @@ require (
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect
github.com/containerd/continuity v0.2.1 // indirect
github.com/daixiang0/gci v0.3.1-0.20220208004058-76d765e3ab48 // indirect
github.com/creachadair/atomicfile v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denis-tingajkin/go-header v0.4.2 // indirect
github.com/dgraph-io/badger/v2 v2.2007.2 // indirect
@ -107,7 +112,6 @@ require (
github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
github.com/gostaticanalysis/comment v1.4.2 // indirect


+ 4
- 3
go.sum View File

@ -940,8 +940,9 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@ -1625,8 +1626,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=


+ 1
- 1
internal/blocksync/pool.go View File

@ -168,7 +168,7 @@ func (pool *BlockPool) removeTimedoutPeers() {
for _, peer := range pool.peers {
// check if peer timed out
if !peer.didTimeout && peer.numPending > 0 {
curRate := peer.recvMonitor.Status().CurRate
curRate := peer.recvMonitor.CurrentTransferRate()
// curRate can be 0 on start
if curRate != 0 && curRate < minRecvRate {
err := errors.New("peer is not sending us data fast enough")


+ 25
- 19
internal/blocksync/reactor.go View File

@ -70,6 +70,8 @@ type Reactor struct {
// immutable
initialState sm.State
// store
stateStore sm.Store
blockExec *sm.BlockExecutor
store *store.BlockStore
@ -101,7 +103,7 @@ type Reactor struct {
func NewReactor(
ctx context.Context,
logger log.Logger,
state sm.State,
stateStore sm.Store,
blockExec *sm.BlockExecutor,
store *store.BlockStore,
consReactor consensusReactor,
@ -111,19 +113,6 @@ func NewReactor(
metrics *consensus.Metrics,
eventBus *eventbus.EventBus,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
}
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
requestsCh := make(chan BlockRequest, maxTotalRequesters)
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
blockSyncCh, err := channelCreator(ctx, GetChannelDescriptor())
if err != nil {
return nil, err
@ -131,20 +120,16 @@ func NewReactor(
r := &Reactor{
logger: logger,
initialState: state,
stateStore: stateStore,
blockExec: blockExec,
store: store,
pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh),
consReactor: consReactor,
blockSync: newAtomicBool(blockSync),
requestsCh: requestsCh,
errorsCh: errorsCh,
blockSyncCh: blockSyncCh,
blockSyncOutBridgeCh: make(chan p2p.Envelope),
peerUpdates: peerUpdates,
metrics: metrics,
eventBus: eventBus,
syncStartTime: time.Time{},
}
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
@ -159,6 +144,27 @@ func NewReactor(
// If blockSync is enabled, we also start the pool and the pool processing
// goroutine. If the pool fails to start, an error is returned.
func (r *Reactor) OnStart(ctx context.Context) error {
state, err := r.stateStore.Load()
if err != nil {
return err
}
r.initialState = state
if state.LastBlockHeight != r.store.Height() {
return fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, r.store.Height())
}
startHeight := r.store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
requestsCh := make(chan BlockRequest, maxTotalRequesters)
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
r.pool = NewBlockPool(r.logger, startHeight, requestsCh, errorsCh)
r.requestsCh = requestsCh
r.errorsCh = errorsCh
if r.blockSync.IsSet() {
if err := r.pool.Start(ctx); err != nil {
return err


+ 10
- 5
internal/blocksync/reactor_test.go View File

@ -14,6 +14,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
@ -33,7 +34,7 @@ type reactorTestSuite struct {
nodes []types.NodeID
reactors map[types.NodeID]*Reactor
app map[types.NodeID]proxy.AppConns
app map[types.NodeID]abciclient.Client
blockSyncChannels map[types.NodeID]*p2p.Channel
peerChans map[types.NodeID]chan p2p.PeerUpdate
@ -64,7 +65,7 @@ func setup(
network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}),
nodes: make([]types.NodeID, 0, numNodes),
reactors: make(map[types.NodeID]*Reactor, numNodes),
app: make(map[types.NodeID]proxy.AppConns, numNodes),
app: make(map[types.NodeID]abciclient.Client, numNodes),
blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
@ -109,7 +110,7 @@ func (rts *reactorTestSuite) addNode(
logger := log.TestingLogger()
rts.nodes = append(rts.nodes, nodeID)
rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics())
rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics())
require.NoError(t, rts.app[nodeID].Start(ctx))
blockDB := dbm.NewMemDB()
@ -121,13 +122,17 @@ func (rts *reactorTestSuite) addNode(
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
eventbus := eventbus.NewDefault(logger)
require.NoError(t, eventbus.Start(ctx))
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
rts.app[nodeID].Consensus(),
rts.app[nodeID],
mock.Mempool{},
sm.EmptyEvidencePool{},
blockStore,
eventbus,
)
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
@ -176,7 +181,7 @@ func (rts *reactorTestSuite) addNode(
rts.reactors[nodeID], err = NewReactor(
ctx,
rts.logger.With("nodeID", nodeID),
state.Copy(),
stateStore,
blockExec,
blockStore,
nil,


+ 165
- 158
internal/consensus/byzantine_test.go View File

@ -82,37 +82,33 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
log.TestingLogger().With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
0,
)
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
// Make a full instance of the evidence pool
evidenceDB := dbm.NewMemDB()
evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
evpool := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
// Make State
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(ctx, logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx, logger, thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
require.NoError(t, err)
// set private validator
pv := privVals[i]
cs.SetPrivValidator(ctx, pv)
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
err = eventBus.Start(ctx)
require.NoError(t, err)
cs.SetEventBus(eventBus)
evpool.SetEventBus(eventBus)
cs.SetTimeoutTicker(tickerFunc())
states[i] = cs
}()
}
rts := setup(ctx, t, nValidators, states, 100) // buffer must be large enough to not deadlock
rts := setup(ctx, t, nValidators, states, 512) // buffer must be large enough to not deadlock
var bzNodeID types.NodeID
@ -238,8 +234,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
}
for _, reactor := range rts.reactors {
state := reactor.state.GetState()
reactor.SwitchToConsensus(ctx, state, false)
reactor.SwitchToConsensus(ctx, reactor.state.GetState(), false)
}
// Evidence should be submitted and committed at the third height but
@ -248,20 +243,26 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
var wg sync.WaitGroup
i := 0
subctx, subcancel := context.WithCancel(ctx)
defer subcancel()
for _, sub := range rts.subs {
wg.Add(1)
go func(j int, s eventbus.Subscription) {
defer wg.Done()
for {
if ctx.Err() != nil {
if subctx.Err() != nil {
return
}
msg, err := s.Next(subctx)
if subctx.Err() != nil {
return
}
msg, err := s.Next(ctx)
assert.NoError(t, err)
if err != nil {
cancel()
t.Errorf("waiting for subscription: %v", err)
subcancel()
return
}
@ -273,12 +274,18 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
}
}
}(i, sub)
i++
}
wg.Wait()
// don't run more assertions if we've encountered a timeout
select {
case <-subctx.Done():
t.Fatal("encountered timeout")
default:
}
pubkey, err := bzNodeState.privValidator.GetPubKey(ctx)
require.NoError(t, err)
@ -317,42 +324,42 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// blocksSubs := make([]types.Subscription, n)
// reactors := make([]p2p.Reactor, n)
// for i := 0; i < n; i++ {
// // enable txs so we can create different proposals
// assertMempool(states[i].txNotifier).EnableTxsAvailable()
// // enable txs so we can create different proposals
// assertMempool(states[i].txNotifier).EnableTxsAvailable()
// eventBus := states[i].eventBus
// eventBus.SetLogger(logger.With("module", "events", "validator", i))
// eventBus := states[i].eventBus
// eventBus.SetLogger(logger.With("module", "events", "validator", i))
// var err error
// blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock)
// require.NoError(t, err)
// var err error
// blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock)
// require.NoError(t, err)
// conR := NewReactor(states[i], true) // so we don't start the consensus states
// conR.SetLogger(logger.With("validator", i))
// conR.SetEventBus(eventBus)
// conR := NewReactor(states[i], true) // so we don't start the consensus states
// conR.SetLogger(logger.With("validator", i))
// conR.SetEventBus(eventBus)
// var conRI p2p.Reactor = conR
// var conRI p2p.Reactor = conR
// // make first val byzantine
// if i == 0 {
// conRI = NewByzantineReactor(conR)
// }
// // make first val byzantine
// if i == 0 {
// conRI = NewByzantineReactor(conR)
// }
// reactors[i] = conRI
// err = states[i].blockExec.Store().Save(states[i].state) // for save height 1's validators info
// require.NoError(t, err)
// reactors[i] = conRI
// err = states[i].blockExec.Store().Save(states[i].state) // for save height 1's validators info
// require.NoError(t, err)
// }
// switches := p2p.MakeConnectedSwitches(config.P2P, N, func(i int, sw *p2p.Switch) *p2p.Switch {
// sw.SetLogger(p2pLogger.With("validator", i))
// sw.AddReactor("CONSENSUS", reactors[i])
// return sw
// sw.SetLogger(p2pLogger.With("validator", i))
// sw.AddReactor("CONSENSUS", reactors[i])
// return sw
// }, func(sws []*p2p.Switch, i, j int) {
// // the network starts partitioned with globally active adversary
// if i != 0 {
// return
// }
// p2p.Connect2Switches(sws, i, j)
// // the network starts partitioned with globally active adversary
// if i != 0 {
// return
// }
// p2p.Connect2Switches(sws, i, j)
// })
// // make first val byzantine
@ -360,26 +367,26 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// // do any safety checks.
// states[0].privValidator.(types.MockPV).DisableChecks()
// states[0].decideProposal = func(j int32) func(int64, int32) {
// return func(height int64, round int32) {
// byzantineDecideProposalFunc(t, height, round, states[j], switches[j])
// }
// return func(height int64, round int32) {
// byzantineDecideProposalFunc(t, height, round, states[j], switches[j])
// }
// }(int32(0))
// // We are setting the prevote function to do nothing because the prevoting
// // and precommitting are done alongside the proposal.
// states[0].doPrevote = func(height int64, round int32) {}
// defer func() {
// for _, sw := range switches {
// err := sw.Stop()
// require.NoError(t, err)
// }
// for _, sw := range switches {
// err := sw.Stop()
// require.NoError(t, err)
// }
// }()
// // start the non-byz state machines.
// // note these must be started before the byz
// for i := 1; i < n; i++ {
// cr := reactors[i].(*Reactor)
// cr.SwitchToConsensus(cr.conS.GetState(), false)
// cr := reactors[i].(*Reactor)
// cr.SwitchToConsensus(cr.conS.GetState(), false)
// }
// // start the byzantine state machine
@ -411,146 +418,146 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// // (one of them already has)
// wg := new(sync.WaitGroup)
// for i := 1; i < N-1; i++ {
// wg.Add(1)
// go func(j int) {
// <-blocksSubs[j].Out()
// wg.Done()
// }(i)
// wg.Add(1)
// go func(j int) {
// <-blocksSubs[j].Out()
// wg.Done()
// }(i)
// }
// done := make(chan struct{})
// go func() {
// wg.Wait()
// close(done)
// wg.Wait()
// close(done)
// }()
// tick := time.NewTicker(time.Second * 10)
// select {
// case <-done:
// case <-tick.C:
// for i, reactor := range reactors {
// t.Log(fmt.Sprintf("Consensus Reactor %v", i))
// t.Log(fmt.Sprintf("%v", reactor))
// }
// t.Fatalf("Timed out waiting for all validators to commit first block")
// for i, reactor := range reactors {
// t.Log(fmt.Sprintf("Consensus Reactor %v", i))
// t.Log(fmt.Sprintf("%v", reactor))
// }
// t.Fatalf("Timed out waiting for all validators to commit first block")
// }
}
// func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) {
// // byzantine user should create two proposals and try to split the vote.
// // Avoid sending on internalMsgQueue and running consensus state.
// // Create a new proposal block from state/txs from the mempool.
// block1, blockParts1 := cs.createProposalBlock()
// polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
// proposal1 := types.NewProposal(height, round, polRound, propBlockID)
// p1 := proposal1.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
// t.Error(err)
// }
// proposal1.Signature = p1.Signature
// // some new transactions come in (this ensures that the proposals are different)
// deliverTxsRange(cs, 0, 1)
// // Create a new proposal block from state/txs from the mempool.
// block2, blockParts2 := cs.createProposalBlock()
// polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
// proposal2 := types.NewProposal(height, round, polRound, propBlockID)
// p2 := proposal2.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
// t.Error(err)
// }
// proposal2.Signature = p2.Signature
// block1Hash := block1.Hash()
// block2Hash := block2.Hash()
// // broadcast conflicting proposals/block parts to peers
// peers := sw.Peers().List()
// t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
// for i, peer := range peers {
// if i < len(peers)/2 {
// go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
// } else {
// go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
// }
// }
// // byzantine user should create two proposals and try to split the vote.
// // Avoid sending on internalMsgQueue and running consensus state.
// // Create a new proposal block from state/txs from the mempool.
// block1, blockParts1 := cs.createProposalBlock()
// polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
// proposal1 := types.NewProposal(height, round, polRound, propBlockID)
// p1 := proposal1.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
// t.Error(err)
// }
// proposal1.Signature = p1.Signature
// // some new transactions come in (this ensures that the proposals are different)
// deliverTxsRange(cs, 0, 1)
// // Create a new proposal block from state/txs from the mempool.
// block2, blockParts2 := cs.createProposalBlock()
// polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
// proposal2 := types.NewProposal(height, round, polRound, propBlockID)
// p2 := proposal2.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
// t.Error(err)
// }
// proposal2.Signature = p2.Signature
// block1Hash := block1.Hash()
// block2Hash := block2.Hash()
// // broadcast conflicting proposals/block parts to peers
// peers := sw.Peers().List()
// t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
// for i, peer := range peers {
// if i < len(peers)/2 {
// go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
// } else {
// go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
// }
// }
// }
// func sendProposalAndParts(
// height int64,
// round int32,
// cs *State,
// peer p2p.Peer,
// proposal *types.Proposal,
// blockHash []byte,
// parts *types.PartSet,
// height int64,
// round int32,
// cs *State,
// peer p2p.Peer,
// proposal *types.Proposal,
// blockHash []byte,
// parts *types.PartSet,
// ) {
// // proposal
// msg := &ProposalMessage{Proposal: proposal}
// peer.Send(DataChannel, MustEncode(msg))
// // parts
// for i := 0; i < int(parts.Total()); i++ {
// part := parts.GetPart(i)
// msg := &BlockPartMessage{
// Height: height, // This tells peer that this part applies to us.
// Round: round, // This tells peer that this part applies to us.
// Part: part,
// }
// peer.Send(DataChannel, MustEncode(msg))
// }
// // votes
// cs.mtx.Lock()
// prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
// precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
// cs.mtx.Unlock()
// peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
// peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
// // proposal
// msg := &ProposalMessage{Proposal: proposal}
// peer.Send(DataChannel, MustEncode(msg))
// // parts
// for i := 0; i < int(parts.Total()); i++ {
// part := parts.GetPart(i)
// msg := &BlockPartMessage{
// Height: height, // This tells peer that this part applies to us.
// Round: round, // This tells peer that this part applies to us.
// Part: part,
// }
// peer.Send(DataChannel, MustEncode(msg))
// }
// // votes
// cs.mtx.Lock()
// prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
// precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
// cs.mtx.Unlock()
// peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
// peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
// }
// type ByzantineReactor struct {
// service.Service
// reactor *Reactor
// service.Service
// reactor *Reactor
// }
// func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
// return &ByzantineReactor{
// Service: conR,
// reactor: conR,
// }
// return &ByzantineReactor{
// Service: conR,
// reactor: conR,
// }
// }
// func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
// func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
// func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
// if !br.reactor.IsRunning() {
// return
// }
// // Create peerState for peer
// peerState := NewPeerState(peer).SetLogger(br.reactor.logger)
// peer.Set(types.PeerStateKey, peerState)
// // Send our state to peer.
// // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
// if !br.reactor.waitSync {
// br.reactor.sendNewRoundStepMessage(peer)
// }
// if !br.reactor.IsRunning() {
// return
// }
// // Create peerState for peer
// peerState := NewPeerState(peer).SetLogger(br.reactor.logger)
// peer.Set(types.PeerStateKey, peerState)
// // Send our state to peer.
// // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
// if !br.reactor.waitSync {
// br.reactor.sendNewRoundStepMessage(peer)
// }
// }
// func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// br.reactor.RemovePeer(peer, reason)
// br.reactor.RemovePeer(peer, reason)
// }
// func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
// br.reactor.Receive(chID, peer, msgBytes)
// br.reactor.Receive(chID, peer, msgBytes)
// }
// func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }

+ 27
- 12
internal/consensus/common_test.go View File

@ -69,6 +69,9 @@ func configSetup(t *testing.T) *config.Config {
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) })
walDir := filepath.Dir(cfg.Consensus.WalFile())
ensureDir(t, walDir, 0700)
return cfg
}
@ -370,7 +373,11 @@ func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte)
vote := msg.Data().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
ch <- msg
select {
case <-ctx.Done():
return ctx.Err()
case ch <- msg:
}
}
return nil
}, types.EventQueryVote); err != nil {
@ -401,7 +408,10 @@ func subscribeToVoterBuffered(ctx context.Context, t *testing.T, cs *State, addr
vote := msg.Data().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
ch <- msg
select {
case <-ctx.Done():
case ch <- msg:
}
}
}
}()
@ -462,7 +472,6 @@ func newStateWithConfigAndBlockStore(
logger.With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
0,
)
if thisConfig.Consensus.WaitForTxs() {
@ -476,22 +485,26 @@ func newStateWithConfigAndBlockStore(
stateStore := sm.NewStore(stateDB)
require.NoError(t, stateStore.Save(state))
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(ctx,
eventBus := eventbus.NewDefault(logger.With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx,
logger.With("module", "consensus"),
thisConfig.Consensus,
state,
stateStore,
blockExec,
blockStore,
mempool,
evpool,
eventBus,
)
cs.SetPrivValidator(ctx, pv)
if err != nil {
t.Fatal(err)
}
eventBus := eventbus.NewDefault(logger.With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
cs.SetPrivValidator(ctx, pv)
cs.SetEventBus(eventBus)
return cs
}
@ -775,6 +788,7 @@ func makeConsensusState(
configOpts ...func(*config.Config),
) ([]*State, cleanupFunc) {
t.Helper()
tempDir := t.TempDir()
valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
@ -789,7 +803,7 @@ func makeConsensusState(
blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
thisConfig, err := ResetConfig(tempDir, fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
configRootDirs = append(configRootDirs, thisConfig.RootDir)
@ -798,7 +812,8 @@ func makeConsensusState(
opt(thisConfig)
}
ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
walDir := filepath.Dir(thisConfig.Consensus.WalFile())
ensureDir(t, walDir, 0700)
app := kvstore.NewApplication()
closeFuncs = append(closeFuncs, app.Close)


+ 33
- 10
internal/consensus/invalid_test.go View File

@ -5,6 +5,7 @@ import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -20,7 +21,7 @@ import (
)
func TestReactorInvalidPrecommit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
config := configSetup(t)
@ -49,14 +50,14 @@ func TestReactorInvalidPrecommit(t *testing.T) {
byzState := rts.states[node.NodeID]
byzReactor := rts.reactors[node.NodeID]
calledDoPrevote := false
signal := make(chan struct{})
// Update the doPrevote function to just send a valid precommit for a random
// block and otherwise disable the priv validator.
byzState.mtx.Lock()
privVal := byzState.privValidator
byzState.doPrevote = func(ctx context.Context, height int64, round int32) {
defer close(signal)
invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, privVal)
calledDoPrevote = true
}
byzState.mtx.Unlock()
@ -72,16 +73,30 @@ func TestReactorInvalidPrecommit(t *testing.T) {
go func(s eventbus.Subscription) {
defer wg.Done()
_, err := s.Next(ctx)
if ctx.Err() != nil {
return
}
if !assert.NoError(t, err) {
cancel() // cancel other subscribers on failure
}
}(sub)
}
}
wait := make(chan struct{})
go func() { defer close(wait); wg.Wait() }()
wg.Wait()
if !calledDoPrevote {
t.Fatal("test failed to run core logic")
select {
case <-wait:
if _, ok := <-signal; !ok {
t.Fatal("test condition did not fire")
}
case <-ctx.Done():
if _, ok := <-signal; !ok {
t.Fatal("test condition did not fire after timeout")
return
}
case <-signal:
// test passed
}
}
@ -130,19 +145,27 @@ func invalidDoPrevoteFunc(
cs.privValidator = nil // disable priv val so we don't do normal votes
cs.mtx.Unlock()
count := 0
r.mtx.Lock()
ids := make([]types.NodeID, 0, len(r.peers))
for _, ps := range r.peers {
ids = append(ids, ps.peerID)
}
r.mtx.Unlock()
count := 0
for _, peerID := range ids {
count++
err := r.voteCh.Send(ctx, p2p.Envelope{
To: ps.peerID,
To: peerID,
Message: &tmcons.Vote{
Vote: precommit.ToProto(),
},
})
// we want to have sent some of these votes,
// but if the test completes without erroring
// and we get here, we shouldn't error
if errors.Is(err, context.Canceled) && count > 1 {
// or not sending any messages, then we should
// error.
if errors.Is(err, context.Canceled) && count > 0 {
break
}
require.NoError(t, err)


+ 11
- 11
internal/consensus/mempool_test.go View File

@ -51,7 +51,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(t, newBlockCh)
deliverTxsRange(ctx, t, cs, 0, 1)
checkTxsRange(ctx, t, cs, 0, 1)
ensureNewEventOnChannel(t, newBlockCh) // commit txs
ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash
ensureNoNewEventOnChannel(t, newBlockCh)
@ -118,7 +118,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
round = 0
ensureNewRound(t, newRoundCh, height, round) // first round at next height
deliverTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
checkTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but don't set a proposal so we get the next round
ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
round++ // moving to the next round
@ -126,7 +126,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block
}
func deliverTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) {
func checkTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) {
t.Helper()
// Deliver some txs.
for i := start; i < end; i++ {
@ -159,7 +159,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader)
const numTxs int64 = 3000
go deliverTxsRange(ctx, t, cs, 0, int(numTxs))
go checkTxsRange(ctx, t, cs, 0, int(numTxs))
startTestRound(ctx, cs, cs.Height, cs.Round)
for n := int64(0); n < numTxs; {
@ -192,8 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resFinalize := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
assert.False(t, resFinalize.TxResults[0].IsErr(), fmt.Sprintf("expected no error. got %v", resFinalize))
resCommit := app.Commit()
assert.True(t, len(resCommit.Data) > 0)
@ -212,7 +212,7 @@ func TestMempoolRmBadTx(t *testing.T) {
checkTxRespCh <- struct{}{}
}, mempool.TxInfo{})
if err != nil {
t.Errorf("error after CheckTx: %w", err)
t.Errorf("error after CheckTx: %v", err)
return
}
@ -265,20 +265,20 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
}
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
respTxs := make([]*abci.ResponseDeliverTx, len(req.Txs))
respTxs := make([]*abci.ExecTxResult, len(req.Txs))
for i, tx := range req.Txs {
txValue := txAsUint64(tx)
if txValue != uint64(app.txCount) {
respTxs[i] = &abci.ResponseDeliverTx{
respTxs[i] = &abci.ExecTxResult{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %d, got %d", app.txCount, txValue),
}
continue
}
app.txCount++
respTxs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK}
respTxs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK}
}
return abci.ResponseFinalizeBlock{Txs: respTxs}
return abci.ResponseFinalizeBlock{TxResults: respTxs}
}
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {


+ 2
- 6
internal/consensus/reactor.go View File

@ -138,6 +138,7 @@ func NewReactor(
cs *State,
channelCreator p2p.ChannelCreator,
peerUpdates *p2p.PeerUpdates,
eventBus *eventbus.EventBus,
waitSync bool,
metrics *Metrics,
) (*Reactor, error) {
@ -166,6 +167,7 @@ func NewReactor(
state: cs,
waitSync: waitSync,
peers: make(map[types.NodeID]*PeerState),
eventBus: eventBus,
Metrics: metrics,
stateCh: stateCh,
dataCh: dataCh,
@ -226,12 +228,6 @@ func (r *Reactor) OnStop() {
}
}
// SetEventBus sets the reactor's event bus.
func (r *Reactor) SetEventBus(b *eventbus.EventBus) {
r.eventBus = b
r.state.SetEventBus(b)
}
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
func (r *Reactor) WaitSync() bool {
r.mtx.RLock()


+ 102
- 25
internal/consensus/reactor_test.go View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"os"
"path"
"sync"
"testing"
"time"
@ -110,13 +109,12 @@ func setup(
state,
chCreator(nodeID),
node.MakePeerUpdates(ctx, t),
state.eventBus,
true,
NopMetrics(),
)
require.NoError(t, err)
reactor.SetEventBus(state.eventBus)
blocksSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{
ClientID: testSubscriber,
Query: types.EventQueryNewBlock,
@ -188,10 +186,17 @@ func waitForAndValidateBlock(
ctx, cancel := context.WithCancel(bctx)
defer cancel()
fn := func(j int) {
msg, err := blocksSubs[j].Next(ctx)
if !assert.NoError(t, err) {
cancel()
switch {
case errors.Is(err, context.DeadlineExceeded):
return
case errors.Is(err, context.Canceled):
return
case err != nil:
cancel() // terminate other workers
require.NoError(t, err)
return
}
@ -217,6 +222,10 @@ func waitForAndValidateBlock(
}
wg.Wait()
if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) {
t.Fatal("encountered timeout")
}
}
func waitForAndValidateBlockWithTx(
@ -236,8 +245,14 @@ func waitForAndValidateBlockWithTx(
ntxs := 0
for {
msg, err := blocksSubs[j].Next(ctx)
if !assert.NoError(t, err) {
cancel()
switch {
case errors.Is(err, context.DeadlineExceeded):
return
case errors.Is(err, context.Canceled):
return
case err != nil:
cancel() // terminate other workers
t.Fatalf("problem waiting for %d subscription: %v", j, err)
return
}
@ -268,6 +283,9 @@ func waitForAndValidateBlockWithTx(
}
wg.Wait()
if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) {
t.Fatal("encountered timeout")
}
}
func waitForBlockWithUpdatedValsAndValidateIt(
@ -287,8 +305,14 @@ func waitForBlockWithUpdatedValsAndValidateIt(
for {
msg, err := blocksSubs[j].Next(ctx)
if !assert.NoError(t, err) {
cancel()
switch {
case errors.Is(err, context.DeadlineExceeded):
return
case errors.Is(err, context.Canceled):
return
case err != nil:
cancel() // terminate other workers
t.Fatalf("problem waiting for %d subscription: %v", j, err)
return
}
@ -311,6 +335,9 @@ func waitForBlockWithUpdatedValsAndValidateIt(
}
wg.Wait()
if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) {
t.Fatal("encountered timeout")
}
}
func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
@ -342,6 +369,8 @@ func TestReactorBasic(t *testing.T) {
}
var wg sync.WaitGroup
errCh := make(chan error, len(rts.subs))
for _, sub := range rts.subs {
wg.Add(1)
@ -349,14 +378,32 @@ func TestReactorBasic(t *testing.T) {
go func(s eventbus.Subscription) {
defer wg.Done()
_, err := s.Next(ctx)
if !assert.NoError(t, err) {
cancel()
switch {
case errors.Is(err, context.DeadlineExceeded):
return
case errors.Is(err, context.Canceled):
return
case err != nil:
errCh <- err
cancel() // terminate other workers
return
}
}(sub)
}
wg.Wait()
if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) {
t.Fatal("encountered timeout")
}
select {
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
default:
}
errCh = make(chan error, len(rts.blocksyncSubs))
for _, sub := range rts.blocksyncSubs {
wg.Add(1)
@ -364,8 +411,14 @@ func TestReactorBasic(t *testing.T) {
go func(s eventbus.Subscription) {
defer wg.Done()
msg, err := s.Next(ctx)
if !assert.NoError(t, err) {
cancel()
switch {
case errors.Is(err, context.DeadlineExceeded):
return
case errors.Is(err, context.Canceled):
return
case err != nil:
errCh <- err
cancel() // terminate other workers
return
}
ensureBlockSyncStatus(t, msg, true, 0)
@ -373,6 +426,17 @@ func TestReactorBasic(t *testing.T) {
}
wg.Wait()
if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) {
t.Fatal("encountered timeout")
}
select {
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
default:
}
}
func TestReactorWithEvidence(t *testing.T) {
@ -395,12 +459,12 @@ func TestReactorWithEvidence(t *testing.T) {
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := kvstore.NewApplication()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
@ -417,7 +481,6 @@ func TestReactorWithEvidence(t *testing.T) {
log.TestingLogger().With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
0,
)
if thisConfig.Consensus.WaitForTxs() {
@ -438,15 +501,15 @@ func TestReactorWithEvidence(t *testing.T) {
evpool2 := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(ctx, logger.With("validator", i, "module", "consensus"),
thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
cs.SetPrivValidator(ctx, pv)
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
cs.SetEventBus(eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx, logger.With("validator", i, "module", "consensus"),
thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool2, eventBus)
require.NoError(t, err)
cs.SetPrivValidator(ctx, pv)
cs.SetTimeoutTicker(tickerFunc())
@ -499,7 +562,6 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
c.Consensus.CreateEmptyBlocks = false
},
)
t.Cleanup(cleanup)
rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock
@ -709,7 +771,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
}
func TestReactorValidatorSetChanges(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
cfg := configSetup(t)
@ -752,7 +814,11 @@ func TestReactorValidatorSetChanges(t *testing.T) {
go func(s eventbus.Subscription) {
defer wg.Done()
_, err := s.Next(ctx)
if !assert.NoError(t, err) {
switch {
case err == nil:
case errors.Is(err, context.DeadlineExceeded):
default:
t.Log(err)
cancel()
}
}(sub)
@ -760,6 +826,17 @@ func TestReactorValidatorSetChanges(t *testing.T) {
wg.Wait()
// after the wait returns, either there was an error with a
// subscription (very unlikely, and causes the context to be
// canceled manually), there was a timeout and the test's root context
// was canceled (somewhat likely,) or the test can proceed
// (common.)
if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) {
t.Fatal("encountered timeout")
} else if errors.Is(err, context.Canceled) {
t.Fatal("subscription encountered unexpected error")
}
newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(ctx)
require.NoError(t, err)


+ 21
- 20
internal/consensus/replay.go View File

@ -10,6 +10,7 @@ import (
"reflect"
"time"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/internal/eventbus"
@ -204,7 +205,7 @@ type Handshaker struct {
stateStore sm.Store
initialState sm.State
store sm.BlockStore
eventBus types.BlockEventPublisher
eventBus *eventbus.EventBus
genDoc *types.GenesisDoc
logger log.Logger
@ -216,7 +217,7 @@ func NewHandshaker(
stateStore sm.Store,
state sm.State,
store sm.BlockStore,
eventBus types.BlockEventPublisher,
eventBus *eventbus.EventBus,
genDoc *types.GenesisDoc,
) *Handshaker {
@ -237,10 +238,10 @@ func (h *Handshaker) NBlocks() int {
}
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error {
func (h *Handshaker) Handshake(ctx context.Context, appClient abciclient.Client) error {
// Handshake is done via ABCI Info on the query conn.
res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo)
res, err := appClient.Info(ctx, proxy.RequestInfo)
if err != nil {
return fmt.Errorf("error calling Info: %w", err)
}
@ -264,7 +265,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err
}
// Replay blocks up to the latest in the blockstore.
_, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp)
_, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, appClient)
if err != nil {
return fmt.Errorf("error on replay: %w", err)
}
@ -285,7 +286,7 @@ func (h *Handshaker) ReplayBlocks(
state sm.State,
appHash []byte,
appBlockHeight int64,
proxyApp proxy.AppConns,
appClient abciclient.Client,
) ([]byte, error) {
storeBlockBase := h.store.Base()
storeBlockHeight := h.store.Height()
@ -316,7 +317,7 @@ func (h *Handshaker) ReplayBlocks(
Validators: nextVals,
AppStateBytes: h.genDoc.AppState,
}
res, err := proxyApp.Consensus().InitChain(ctx, req)
res, err := appClient.InitChain(ctx, req)
if err != nil {
return nil, err
}
@ -390,7 +391,7 @@ func (h *Handshaker) ReplayBlocks(
// Either the app is asking for replay, or we're all synced up.
if appBlockHeight < storeBlockHeight {
// the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store)
return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false)
return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, false)
} else if appBlockHeight == storeBlockHeight {
// We're good!
@ -405,7 +406,7 @@ func (h *Handshaker) ReplayBlocks(
case appBlockHeight < stateBlockHeight:
// the app is further behind than it should be, so replay blocks
// but leave the last block to go through the WAL
return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true)
return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, true)
case appBlockHeight == stateBlockHeight:
// We haven't run Commit (both the state and app are one block behind),
@ -413,7 +414,7 @@ func (h *Handshaker) ReplayBlocks(
// NOTE: We could instead use the cs.WAL on cs.Start,
// but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
h.logger.Info("Replay last block using real app")
state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus())
state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient)
return state.AppHash, err
case appBlockHeight == storeBlockHeight:
@ -426,6 +427,9 @@ func (h *Handshaker) ReplayBlocks(
if err != nil {
return nil, err
}
if err := mockApp.Start(ctx); err != nil {
return nil, err
}
h.logger.Info("Replay last block using mock app")
state, err = h.replayBlock(ctx, state, storeBlockHeight, mockApp)
@ -445,7 +449,7 @@ func (h *Handshaker) ReplayBlocks(
func (h *Handshaker) replayBlocks(
ctx context.Context,
state sm.State,
proxyApp proxy.AppConns,
appClient abciclient.Client,
appBlockHeight,
storeBlockHeight int64,
mutateState bool) ([]byte, error) {
@ -480,17 +484,15 @@ func (h *Handshaker) replayBlocks(
if i == finalBlock && !mutateState {
// We emit events for the index services at the final block due to the sync issue when
// the node shutdown during the block committing status.
blockExec := sm.NewBlockExecutor(
h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{}, h.store)
blockExec.SetEventBus(h.eventBus)
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus)
appHash, err = sm.ExecCommitBlock(ctx,
blockExec, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
blockExec, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
if err != nil {
return nil, err
}
} else {
appHash, err = sm.ExecCommitBlock(ctx,
nil, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
nil, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
if err != nil {
return nil, err
}
@ -501,7 +503,7 @@ func (h *Handshaker) replayBlocks(
if mutateState {
// sync the final block
state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus())
state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient)
if err != nil {
return nil, err
}
@ -517,15 +519,14 @@ func (h *Handshaker) replayBlock(
ctx context.Context,
state sm.State,
height int64,
proxyApp proxy.AppConnConsensus,
appClient abciclient.Client,
) (sm.State, error) {
block := h.store.LoadBlock(height)
meta := h.store.LoadBlockMeta(height)
// Use stubs for both mempool and evidence pool since no transactions nor
// evidence are needed here - block already exists.
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, h.store)
blockExec.SetEventBus(h.eventBus)
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus)
var err error
state, err = blockExec.ApplyBlock(ctx, state, meta.BlockID, block)


+ 26
- 20
internal/consensus/replay_file.go View File

@ -84,7 +84,7 @@ func (cs *State) ReplayFile(ctx context.Context, file string, console bool) erro
return err
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
pb := newPlayback(file, fp, cs, cs.stateStore)
defer pb.fp.Close()
var nextN int // apply N msgs in a row
@ -126,17 +126,17 @@ type playback struct {
count int // how many lines/msgs into the file are we
// replays can be reset to beginning
fileName string // so we can close/reopen the file
genesisState sm.State // so the replay session knows where to restart from
fileName string // so we can close/reopen the file
stateStore sm.Store
}
func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *playback {
func newPlayback(fileName string, fp *os.File, cs *State, store sm.Store) *playback {
return &playback{
cs: cs,
fp: fp,
fileName: fileName,
genesisState: genState,
dec: NewWALDecoder(fp),
cs: cs,
fp: fp,
fileName: fileName,
stateStore: store,
dec: NewWALDecoder(fp),
}
}
@ -145,9 +145,11 @@ func (pb *playback) replayReset(ctx context.Context, count int, newStepSub event
pb.cs.Stop()
pb.cs.Wait()
newCS := NewState(ctx, pb.cs.logger, pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
newCS.SetEventBus(pb.cs.eventBus)
newCS, err := NewState(ctx, pb.cs.logger, pb.cs.config, pb.stateStore, pb.cs.blockExec,
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, pb.cs.eventBus)
if err != nil {
return err
}
newCS.startForReplay()
if err := pb.fp.Close(); err != nil {
@ -323,9 +325,12 @@ func newConsensusStateForReplay(
return nil, err
}
// Create proxyAppConn connection (consensus, mempool, query)
clientCreator, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir())
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
client, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir())
if err != nil {
return nil, err
}
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
err = proxyApp.Start(ctx)
if err != nil {
return nil, fmt.Errorf("starting proxy app conns: %w", err)
@ -343,11 +348,12 @@ func newConsensusStateForReplay(
}
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mempool, evpool, blockStore)
consensusState := NewState(ctx, logger, csConfig, state.Copy(), blockExec,
blockStore, mempool, evpool)
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mempool, evpool, blockStore, eventBus)
consensusState.SetEventBus(eventBus)
consensusState, err := NewState(ctx, logger, csConfig, stateStore, blockExec,
blockStore, mempool, evpool, eventBus)
if err != nil {
return nil, err
}
return consensusState, nil
}

+ 4
- 15
internal/consensus/replay_stubs.go View File

@ -32,7 +32,7 @@ func (emptyMempool) Update(
_ context.Context,
_ int64,
_ types.Txs,
_ []*abci.ResponseDeliverTx,
_ []*abci.ExecTxResult,
_ mempool.PreCheckFunc,
_ mempool.PostCheckFunc,
) error {
@ -61,22 +61,11 @@ func newMockProxyApp(
logger log.Logger,
appHash []byte,
abciResponses *tmstate.ABCIResponses,
) (proxy.AppConnConsensus, error) {
clientCreator := abciclient.NewLocalCreator(&mockProxyApp{
) (abciclient.Client, error) {
return proxy.New(abciclient.NewLocalClient(logger, &mockProxyApp{
appHash: appHash,
abciResponses: abciResponses,
})
cli, err := clientCreator(logger)
if err != nil {
return nil, err
}
if err = cli.Start(ctx); err != nil {
return nil, err
}
return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()), nil
}), logger, proxy.NopMetrics()), nil
}
type mockProxyApp struct {


+ 46
- 87
internal/consensus/replay_test.go View File

@ -35,7 +35,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/privval"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
@ -652,61 +651,6 @@ func TestHandshakeReplayNone(t *testing.T) {
}
}
// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx
func TestMockProxyApp(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sim := setupSimulator(ctx, t) // setup config and simulator
cfg := sim.Config
assert.NotNil(t, cfg)
logger := log.TestingLogger()
var validTxs, invalidTxs = 0, 0
txCount := 0
assert.NotPanics(t, func() {
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
abciResWithEmptyDeliverTx.FinalizeBlock = new(abci.ResponseFinalizeBlock)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{})
// called when saveABCIResponses:
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
require.NoError(t, err)
loadedAbciRes := new(tmstate.ABCIResponses)
// this also happens sm.LoadABCIResponses
err = proto.Unmarshal(bytes, loadedAbciRes)
require.NoError(t, err)
mock, err := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes)
require.NoError(t, err)
abciRes := new(tmstate.ABCIResponses)
abciRes.FinalizeBlock = new(abci.ResponseFinalizeBlock)
abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs))
someTx := []byte("tx")
resp, err := mock.FinalizeBlock(ctx, abci.RequestFinalizeBlock{Txs: [][]byte{someTx}})
require.NoError(t, err)
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
for _, tx := range resp.Txs {
if tx.Code == abci.CodeTypeOK {
validTxs++
} else {
invalidTxs++
}
txCount++
}
})
require.Equal(t, 1, txCount)
require.Equal(t, 1, validTxs)
require.Zero(t, invalidTxs)
}
func tempWALWithData(t *testing.T, data []byte) string {
t.Helper()
@ -804,16 +748,19 @@ func testHandshakeReplay(
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int())))
t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) })
clientCreator2 := abciclient.NewLocalCreator(kvstoreApp)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
clientCreator2 := abciclient.NewLocalClient(logger, kvstoreApp)
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics())
proxyApp := proxy.New(clientCreator2, logger, proxy.NopMetrics())
stateDB1 := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB1)
err := stateStore.Save(genesisState)
require.NoError(t, err)
buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store)
buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, eventBus, nBlocks, mode, store)
}
// Prune block store if requested
@ -828,10 +775,11 @@ func testHandshakeReplay(
// now start the app using the handshake - it should sync
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
require.NoError(t, err)
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics())
handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
proxyApp := proxy.New(clientCreator2, logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
require.True(t, proxyApp.IsRunning())
require.NotNil(t, proxyApp)
t.Cleanup(func() { cancel(); proxyApp.Wait() })
err = handshaker.Handshake(ctx, proxyApp)
@ -842,7 +790,7 @@ func testHandshakeReplay(
require.NoError(t, err, "Error on abci handshake")
// get the latest app hash from the app
res, err := proxyApp.Query().Info(ctx, abci.RequestInfo{Version: ""})
res, err := proxyApp.Info(ctx, abci.RequestInfo{Version: ""})
if err != nil {
t.Fatal(err)
}
@ -875,11 +823,12 @@ func applyBlock(
evpool sm.EvidencePool,
st sm.State,
blk *types.Block,
proxyApp proxy.AppConns,
appClient abciclient.Client,
blockStore *mockBlockStore,
eventBus *eventbus.EventBus,
) sm.State {
testPartSize := types.BlockPartSizeBytes
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), appClient, mempool, evpool, blockStore, eventBus)
bps, err := blk.MakePartSet(testPartSize)
require.NoError(t, err)
@ -892,23 +841,24 @@ func applyBlock(
func buildAppStateFromChain(
ctx context.Context,
t *testing.T,
proxyApp proxy.AppConns,
appClient abciclient.Client,
stateStore sm.Store,
mempool mempool.Mempool,
evpool sm.EvidencePool,
state sm.State,
chain []*types.Block,
eventBus *eventbus.EventBus,
nBlocks int,
mode uint,
blockStore *mockBlockStore,
) {
t.Helper()
// start a new app without handshake, play nBlocks blocks
require.NoError(t, proxyApp.Start(ctx))
require.NoError(t, appClient.Start(ctx))
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
_, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{
_, err := appClient.InitChain(ctx, abci.RequestInitChain{
Validators: validators,
})
require.NoError(t, err)
@ -919,18 +869,18 @@ func buildAppStateFromChain(
case 0:
for i := 0; i < nBlocks; i++ {
block := chain[i]
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus)
}
case 1, 2, 3:
for i := 0; i < nBlocks-1; i++ {
block := chain[i]
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus)
}
if mode == 2 || mode == 3 {
// update the kvstore height and apphash
// as if we ran commit but not
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], appClient, blockStore, eventBus)
}
default:
require.Fail(t, "unknown mode %v", mode)
@ -958,37 +908,40 @@ func buildTMStateFromChain(
kvstoreApp := kvstore.NewPersistentKVStoreApplication(logger,
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))
defer kvstoreApp.Close()
clientCreator := abciclient.NewLocalCreator(kvstoreApp)
client := abciclient.NewLocalClient(logger, kvstoreApp)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx))
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
_, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{
_, err := proxyApp.InitChain(ctx, abci.RequestInitChain{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
switch mode {
case 0:
// sync right up
for _, block := range chain {
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus)
}
case 1, 2, 3:
// sync up to the penultimate as if we stored the block.
// whether we commit or not depends on the appHash
for _, block := range chain[:len(chain)-1] {
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus)
}
// apply the final block to a state copy so we can
// get the right next appHash but keep the state back
applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore)
applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore, eventBus)
default:
require.Fail(t, "unknown mode %v", mode)
}
@ -1025,20 +978,23 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
logger := log.TestingLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
// 2. Tendermint must panic if app returns wrong hash for the first block
// - RANDOM HASH
// - 0x02
// - 0x03
{
app := &badApp{numBlocks: 3, allHashesAreWrong: true}
clientCreator := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
client := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
t.Cleanup(func() { cancel(); proxyApp.Wait() })
assert.Panics(t, func() {
h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
if err = h.Handshake(ctx, proxyApp); err != nil {
t.Log(err)
}
@ -1051,14 +1007,14 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
// - RANDOM HASH
{
app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true}
clientCreator := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
client := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
t.Cleanup(func() { cancel(); proxyApp.Wait() })
assert.Panics(t, func() {
h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
if err = h.Handshake(ctx, proxyApp); err != nil {
t.Log(err)
}
@ -1282,12 +1238,16 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
votePower := 10 + int64(rand.Uint32())
val, _, err := factory.Validator(ctx, votePower)
require.NoError(t, err)
vals := types.NewValidatorSet([]*types.Validator{val})
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := abciclient.NewLocalCreator(app)
client := abciclient.NewLocalClient(logger, app)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
cfg, err := ResetConfig(t.TempDir(), "handshake_test_")
require.NoError(t, err)
@ -1306,9 +1266,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
require.NoError(t, err)
logger := log.TestingLogger()
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
require.NoError(t, handshaker.Handshake(ctx, proxyApp), "error on abci handshake")


+ 64
- 31
internal/consensus/state.go View File

@ -20,6 +20,7 @@ import (
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/jsontypes"
"github.com/tendermint/tendermint/internal/libs/autofile"
sm "github.com/tendermint/tendermint/internal/state"
tmevents "github.com/tendermint/tendermint/libs/events"
"github.com/tendermint/tendermint/libs/log"
@ -121,6 +122,9 @@ type State struct {
// store blocks and commits
blockStore sm.BlockStore
stateStore sm.Store
initialStatePopulated bool
// create and execute blocks
blockExec *sm.BlockExecutor
@ -189,18 +193,21 @@ func NewState(
ctx context.Context,
logger log.Logger,
cfg *config.ConsensusConfig,
state sm.State,
store sm.Store,
blockExec *sm.BlockExecutor,
blockStore sm.BlockStore,
txNotifier txNotifier,
evpool evidencePool,
eventBus *eventbus.EventBus,
options ...StateOption,
) *State {
) (*State, error) {
cs := &State{
eventBus: eventBus,
logger: logger,
config: cfg,
blockExec: blockExec,
blockStore: blockStore,
stateStore: store,
txNotifier: txNotifier,
peerMsgQueue: make(chan msgInfo, msgQueueSize),
internalMsgQueue: make(chan msgInfo, msgQueueSize),
@ -220,27 +227,40 @@ func NewState(
cs.doPrevote = cs.defaultDoPrevote
cs.setProposal = cs.defaultSetProposal
// We have no votes, so reconstruct LastCommit from SeenCommit.
if state.LastBlockHeight > 0 {
cs.reconstructLastCommit(state)
if err := cs.updateStateFromStore(ctx); err != nil {
return nil, err
}
cs.updateToState(ctx, state)
// NOTE: we do not call scheduleRound0 yet, we do that upon Start()
cs.BaseService = *service.NewBaseService(logger, "State", cs)
for _, option := range options {
option(cs)
}
return cs
return cs, nil
}
// SetEventBus sets event bus.
func (cs *State) SetEventBus(b *eventbus.EventBus) {
cs.eventBus = b
cs.blockExec.SetEventBus(b)
func (cs *State) updateStateFromStore(ctx context.Context) error {
if cs.initialStatePopulated {
return nil
}
state, err := cs.stateStore.Load()
if err != nil {
return fmt.Errorf("loading state: %w", err)
}
if state.IsEmpty() {
return nil
}
// We have no votes, so reconstruct LastCommit from SeenCommit.
if state.LastBlockHeight > 0 {
cs.reconstructLastCommit(state)
}
cs.updateToState(ctx, state)
cs.initialStatePopulated = true
return nil
}
// StateMetrics sets the metrics.
@ -365,6 +385,10 @@ func (cs *State) LoadCommit(height int64) *types.Commit {
// OnStart loads the latest state via the WAL, and starts the timeout and
// receive routines.
func (cs *State) OnStart(ctx context.Context) error {
if err := cs.updateStateFromStore(ctx); err != nil {
return err
}
// We may set the WAL in testing before calling Start, so only OpenWAL if its
// still the nilWAL.
if _, ok := cs.wal.(nilWAL); ok {
@ -846,15 +870,27 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
defer func() {
if r := recover(); r != nil {
cs.logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack()))
// stop gracefully
//
// NOTE: We most probably shouldn't be running any further when there is
// some unexpected panic. Some unknown error happened, and so we don't
// know if that will result in the validator signing an invalid thing. It
// might be worthwhile to explore a mechanism for manual resuming via
// some console or secure RPC system, but for now, halting the chain upon
// unexpected consensus bugs sounds like the better option.
// Make a best-effort attempt to close the WAL, but otherwise do not
// attempt to gracefully terminate. Once consensus has irrecoverably
// failed, any additional progress we permit the node to make may
// complicate diagnosing and recovering from the failure.
onExit(cs)
// Re-panic to ensure the node terminates.
//
// TODO(creachadair): In ordinary operation, the WAL autofile should
// never be closed. This only happens during shutdown and production
// nodes usually halt by panicking. Many existing tests, however,
// assume a clean shutdown is possible. Prior to #8111, we were
// swallowing the panic in receiveRoutine, making that appear to
// work. Filtering this specific error is slightly risky, but should
// affect only unit tests. In any case, not re-panicking here only
// preserves the pre-existing behavior for this one error type.
if err, ok := r.(error); ok && errors.Is(err, autofile.ErrAutoFileClosed) {
return
}
panic(r)
}
}()
@ -867,14 +903,11 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
}
}
rs := cs.RoundState
var mi msgInfo
select {
case <-cs.txNotifier.TxsAvailable():
cs.handleTxsAvailable(ctx)
case mi = <-cs.peerMsgQueue:
case mi := <-cs.peerMsgQueue:
if err := cs.wal.Write(mi); err != nil {
cs.logger.Error("failed writing to WAL", "err", err)
}
@ -883,11 +916,11 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
// may generate internal events (votes, complete proposals, 2/3 majorities)
cs.handleMsg(ctx, mi)
case mi = <-cs.internalMsgQueue:
case mi := <-cs.internalMsgQueue:
err := cs.wal.WriteSync(mi) // NOTE: fsync
if err != nil {
panic(fmt.Sprintf(
"failed to write %v msg to consensus WAL due to %v; check your file system and restart the node",
panic(fmt.Errorf(
"failed to write %v msg to consensus WAL due to %w; check your file system and restart the node",
mi, err,
))
}
@ -902,7 +935,7 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
// if the timeout is relevant to the rs
// go to the next step
cs.handleTimeout(ctx, ti, rs)
cs.handleTimeout(ctx, ti, cs.RoundState)
case <-ctx.Done():
onExit(cs)
@ -1880,8 +1913,8 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
// restart).
endMsg := EndHeightMessage{height}
if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync
panic(fmt.Sprintf(
"failed to write %v msg to consensus WAL due to %v; check your file system and restart the node",
panic(fmt.Errorf(
"failed to write %v msg to consensus WAL due to %w; check your file system and restart the node",
endMsg, err,
))
}


+ 73
- 0
internal/consensus/state_test.go View File

@ -1965,6 +1965,79 @@ func TestProcessProposalAccept(t *testing.T) {
}
}
func TestFinalizeBlockCalled(t *testing.T) {
for _, testCase := range []struct {
name string
voteNil bool
expectCalled bool
}{
{
name: "finalze block called when block committed",
voteNil: false,
expectCalled: true,
},
{
name: "not called when block not committed",
voteNil: true,
expectCalled: false,
},
} {
t.Run(testCase.name, func(t *testing.T) {
config := configSetup(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
m := abcimocks.NewBaseMock()
m.On("ProcessProposal", mock.Anything).Return(abcitypes.ResponseProcessProposal{Accept: true})
m.On("VerifyVoteExtension", mock.Anything).Return(abcitypes.ResponseVerifyVoteExtension{
Result: abcitypes.ResponseVerifyVoteExtension_ACCEPT,
})
m.On("FinalizeBlock", mock.Anything).Return(abcitypes.ResponseFinalizeBlock{}).Maybe()
cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m})
height, round := cs1.Height, cs1.Round
proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound)
pv1, err := cs1.privValidator.GetPubKey(ctx)
require.NoError(t, err)
addr := pv1.Address()
voteCh := subscribeToVoter(ctx, t, cs1, addr)
startTestRound(ctx, cs1, cs1.Height, round)
ensureNewRound(t, newRoundCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
rs := cs1.GetRoundState()
blockID := types.BlockID{}
nextRound := round + 1
nextHeight := height
if !testCase.voteNil {
nextRound = 0
nextHeight = height + 1
blockID = types.BlockID{
Hash: rs.ProposalBlock.Hash(),
PartSetHeader: rs.ProposalBlockParts.Header(),
}
}
signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...)
ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash())
signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...)
ensurePrecommit(t, voteCh, height, round)
ensureNewRound(t, newRoundCh, nextHeight, nextRound)
m.AssertExpectations(t)
if !testCase.expectCalled {
m.AssertNotCalled(t, "FinalizeBlock", mock.Anything)
} else {
m.AssertCalled(t, "FinalizeBlock", mock.Anything)
}
})
}
}
// 4 vals, 3 Nil Precommits at P0
// What we want:
// P0 waits for timeoutPrecommit before starting next round


+ 30
- 23
internal/consensus/wal_generator.go View File

@ -30,8 +30,10 @@ import (
// stripped down version of node (proxy app, event bus, consensus state) with a
// persistent kvstore application and special consensus wal instance
// (byteBufferWAL) and waits until numBlocks are created.
// If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) (err error) {
// If the node fails to produce given numBlocks, it fails the test.
func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) {
t.Helper()
cfg := getConfig(t)
app := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), "wal_generator"))
@ -46,41 +48,46 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
privValidatorStateFile := cfg.PrivValidator.StateFile()
privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
if err != nil {
return err
t.Fatal(err)
}
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
return fmt.Errorf("failed to read genesis file: %w", err)
t.Fatal(fmt.Errorf("failed to read genesis file: %w", err))
}
blockStoreDB := dbm.NewMemDB()
stateDB := blockStoreDB
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
if err != nil {
return fmt.Errorf("failed to make genesis state: %w", err)
t.Fatal(fmt.Errorf("failed to make genesis state: %w", err))
}
state.Version.Consensus.App = kvstore.ProtocolVersion
if err = stateStore.Save(state); err != nil {
t.Error(err)
t.Fatal(err)
}
blockStore := store.NewBlockStore(blockStoreDB)
proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), logger.With("module", "proxy"), proxy.NopMetrics())
proxyLogger := logger.With("module", "proxy")
proxyApp := proxy.New(abciclient.NewLocalClient(logger, app), proxyLogger, proxy.NopMetrics())
if err := proxyApp.Start(ctx); err != nil {
return fmt.Errorf("failed to start proxy app connections: %w", err)
t.Fatal(fmt.Errorf("failed to start proxy app connections: %w", err))
}
t.Cleanup(proxyApp.Wait)
eventBus := eventbus.NewDefault(logger.With("module", "events"))
if err := eventBus.Start(ctx); err != nil {
return fmt.Errorf("failed to start event bus: %w", err)
t.Fatal(fmt.Errorf("failed to start event bus: %w", err))
}
t.Cleanup(func() { eventBus.Stop(); eventBus.Wait() })
mempool := emptyMempool{}
evpool := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
consensusState := NewState(ctx, logger, cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
consensusState.SetEventBus(eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp, mempool, evpool, blockStore, eventBus)
consensusState, err := NewState(ctx, logger, cfg.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
if err != nil {
t.Fatal(err)
}
if privValidator != nil && privValidator != (*privval.FilePV)(nil) {
consensusState.SetPrivValidator(ctx, privValidator)
}
@ -91,22 +98,24 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
// see wal.go#103
if err := wal.Write(EndHeightMessage{0}); err != nil {
t.Error(err)
t.Fatal(err)
}
consensusState.wal = wal
if err := consensusState.Start(ctx); err != nil {
return fmt.Errorf("failed to start consensus state: %w", err)
t.Fatal(fmt.Errorf("failed to start consensus state: %w", err))
}
t.Cleanup(consensusState.Wait)
defer consensusState.Stop()
timer := time.NewTimer(time.Minute)
defer timer.Stop()
select {
case <-numBlocksWritten:
consensusState.Stop()
return nil
case <-time.After(1 * time.Minute):
consensusState.Stop()
return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
case <-timer.C:
t.Fatal(fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks))
}
}
@ -115,9 +124,7 @@ func WALWithNBlocks(ctx context.Context, t *testing.T, logger log.Logger, numBlo
var b bytes.Buffer
wr := bufio.NewWriter(&b)
if err := WALGenerateNBlocks(ctx, t, logger, wr, numBlocks); err != nil {
return []byte{}, err
}
WALGenerateNBlocks(ctx, t, logger, wr, numBlocks)
wr.Flush()
return b.Bytes(), nil


+ 9
- 10
internal/consensus/wal_test.go View File

@ -3,6 +3,7 @@ package consensus
import (
"bytes"
"context"
"os"
"path/filepath"
"testing"
@ -41,13 +42,12 @@ func TestWALTruncate(t *testing.T) {
require.NoError(t, err)
err = wal.Start(ctx)
require.NoError(t, err)
t.Cleanup(wal.Wait)
t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() })
// 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10),
// when headBuf is full, truncate content will Flush to the file. at this
// time, RotateFile is called, truncate content exist in each file.
err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60)
require.NoError(t, err)
WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60)
// put the leakcheck here so it runs after other cleanup
// functions.
@ -112,7 +112,7 @@ func TestWALWrite(t *testing.T) {
require.NoError(t, err)
err = wal.Start(ctx)
require.NoError(t, err)
t.Cleanup(wal.Wait)
t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() })
// 1) Write returns an error if msg is too big
msg := &BlockPartMessage{
@ -151,7 +151,6 @@ func TestWALSearchForEndHeight(t *testing.T) {
wal, err := NewWAL(ctx, logger, walFile)
require.NoError(t, err)
t.Cleanup(func() { wal.Stop(); wal.Wait() })
h := int64(3)
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
@ -176,24 +175,24 @@ func TestWALPeriodicSync(t *testing.T) {
walDir := t.TempDir()
walFile := filepath.Join(walDir, "wal")
wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond))
defer os.RemoveAll(walFile)
wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(250*time.Millisecond))
require.NoError(t, err)
wal.SetFlushInterval(walTestFlushInterval)
logger := log.NewNopLogger()
// Generate some data
err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5)
require.NoError(t, err)
WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5)
// We should have data in the buffer now
assert.NotZero(t, wal.Group().Buffered())
require.NoError(t, wal.Start(ctx))
t.Cleanup(func() { wal.Stop(); wal.Wait() })
t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() })
time.Sleep(walTestFlushInterval + (10 * time.Millisecond))
time.Sleep(walTestFlushInterval + (20 * time.Millisecond))
// The data should have been flushed by the periodic sync
assert.Zero(t, wal.Group().Buffered())


+ 0
- 32
internal/eventbus/event_bus.go View File

@ -50,13 +50,6 @@ func (b *EventBus) NumClientSubscriptions(clientID string) int {
return b.pubsub.NumClientSubscriptions(clientID)
}
// Deprecated: Use SubscribeWithArgs instead.
func (b *EventBus) Subscribe(ctx context.Context,
clientID string, query *tmquery.Query, capacities ...int) (Subscription, error) {
return b.pubsub.Subscribe(ctx, clientID, query, capacities...)
}
func (b *EventBus) SubscribeWithArgs(ctx context.Context, args tmpubsub.SubscribeArgs) (Subscription, error) {
return b.pubsub.SubscribeWithArgs(ctx, args)
}
@ -201,28 +194,3 @@ func (b *EventBus) PublishEventValidatorSetUpdates(ctx context.Context, data typ
func (b *EventBus) PublishEventEvidenceValidated(ctx context.Context, evidence types.EventDataEvidenceValidated) error {
return b.Publish(ctx, types.EventEvidenceValidatedValue, evidence)
}
//-----------------------------------------------------------------------------
// NopEventBus implements a types.BlockEventPublisher that discards all events.
type NopEventBus struct{}
func (NopEventBus) PublishEventNewBlock(context.Context, types.EventDataNewBlock) error {
return nil
}
func (NopEventBus) PublishEventNewBlockHeader(context.Context, types.EventDataNewBlockHeader) error {
return nil
}
func (NopEventBus) PublishEventNewEvidence(context.Context, types.EventDataNewEvidence) error {
return nil
}
func (NopEventBus) PublishEventTx(context.Context, types.EventDataTx) error {
return nil
}
func (NopEventBus) PublishEventValidatorSetUpdates(context.Context, types.EventDataValidatorSetUpdates) error {
return nil
}

+ 2
- 2
internal/eventbus/event_bus_test.go View File

@ -27,7 +27,7 @@ func TestEventBusPublishEventTx(t *testing.T) {
require.NoError(t, err)
tx := types.Tx("foo")
result := abci.ResponseDeliverTx{
result := abci.ExecTxResult{
Data: []byte("bar"),
Events: []abci.Event{
{Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}},
@ -134,7 +134,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) {
require.NoError(t, err)
tx := types.Tx("foo")
result := abci.ResponseDeliverTx{
result := abci.ExecTxResult{
Data: []byte("bar"),
Events: []abci.Event{
{


+ 33
- 35
internal/evidence/pool.go View File

@ -36,14 +36,14 @@ type Pool struct {
evidenceList *clist.CList // concurrent linked-list of evidence
evidenceSize uint32 // amount of pending evidence
// needed to load validators to verify evidence
stateDB sm.Store
// needed to load headers and commits to verify evidence
blockStore BlockStore
stateDB sm.Store
mtx sync.Mutex
// latest state
state sm.State
state sm.State
isStarted bool
// evidence from consensus is buffered to this slice, awaiting until the next height
// before being flushed to the pool. This prevents broadcasting and proposing of
// evidence before the height with which the evidence happened is finished.
@ -60,46 +60,19 @@ type Pool struct {
Metrics *Metrics
}
func (evpool *Pool) SetEventBus(e *eventbus.EventBus) {
evpool.eventBus = e
}
// NewPool creates an evidence pool. If using an existing evidence store,
// it will add all pending evidence to the concurrent list.
func NewPool(logger log.Logger, evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore, metrics *Metrics) (*Pool, error) {
state, err := stateDB.Load()
if err != nil {
return nil, fmt.Errorf("failed to load state: %w", err)
}
pool := &Pool{
stateDB: stateDB,
func NewPool(logger log.Logger, evidenceDB dbm.DB, stateStore sm.Store, blockStore BlockStore, metrics *Metrics, eventBus *eventbus.EventBus) *Pool {
return &Pool{
blockStore: blockStore,
state: state,
stateDB: stateStore,
logger: logger,
evidenceStore: evidenceDB,
evidenceList: clist.New(),
consensusBuffer: make([]duplicateVoteSet, 0),
Metrics: metrics,
eventBus: eventBus,
}
// If pending evidence already in db, in event of prior failure, then check
// for expiration, update the size and load it back to the evidenceList.
pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence()
evList, _, err := pool.listEvidence(prefixPending, -1)
if err != nil {
return nil, err
}
atomic.StoreUint32(&pool.evidenceSize, uint32(len(evList)))
pool.Metrics.NumEvidence.Set(float64(pool.evidenceSize))
for _, ev := range evList {
pool.evidenceList.PushBack(ev)
}
pool.eventBus = nil
return pool, nil
}
// PendingEvidence is used primarily as part of block proposal and returns up to
@ -277,6 +250,31 @@ func (evpool *Pool) State() sm.State {
return evpool.state
}
func (evpool *Pool) Start(state sm.State) error {
if evpool.isStarted {
return errors.New("pool is already running")
}
evpool.state = state
// If pending evidence already in db, in event of prior failure, then check
// for expiration, update the size and load it back to the evidenceList.
evpool.pruningHeight, evpool.pruningTime = evpool.removeExpiredPendingEvidence()
evList, _, err := evpool.listEvidence(prefixPending, -1)
if err != nil {
return err
}
atomic.StoreUint32(&evpool.evidenceSize, uint32(len(evList)))
evpool.Metrics.NumEvidence.Set(float64(evpool.evidenceSize))
for _, ev := range evList {
evpool.evidenceList.PushBack(ev)
}
return nil
}
func (evpool *Pool) Close() error {
return evpool.evidenceStore.Close()
}
@ -449,6 +447,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide
}
func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) {
batch := evpool.evidenceStore.NewBatch()
defer batch.Close()
@ -473,7 +472,6 @@ func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) {
// remove evidence from the clist
evpool.removeEvidenceFromList(blockEvidenceMap)
// update the evidence size
atomic.AddUint32(&evpool.evidenceSize, ^uint32(len(blockEvidenceMap)-1))


+ 53
- 48
internal/evidence/pool_test.go View File

@ -34,6 +34,18 @@ var (
defaultEvidenceMaxBytes int64 = 1000
)
func startPool(t *testing.T, pool *evidence.Pool, store sm.Store) {
t.Helper()
state, err := store.Load()
if err != nil {
t.Fatalf("cannot load state: %v", err)
}
if err := pool.Start(state); err != nil {
t.Fatalf("cannot start state pool: %v", err)
}
}
func TestEvidencePoolBasic(t *testing.T) {
var (
height = int64(1)
@ -51,9 +63,13 @@ func TestEvidencePoolBasic(t *testing.T) {
stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil)
stateStore.On("Load").Return(createState(height+1, valSet), nil)
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
require.NoError(t, setupEventBus(ctx, pool))
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
// evidence not seen yet:
evs, size := pool.PendingEvidence(defaultEvidenceMaxBytes)
require.Equal(t, 0, len(evs))
@ -115,10 +131,12 @@ func TestAddExpiredEvidence(t *testing.T) {
return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}}
})
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
testCases := []struct {
evHeight int64
@ -159,9 +177,7 @@ func TestReportConflictingVotes(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, pv := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, pv, _ := defaultTestPool(ctx, t, height)
val := types.NewValidator(pv.PrivKey.PubKey(), 10)
@ -201,9 +217,7 @@ func TestEvidencePoolUpdate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, val, _ := defaultTestPool(ctx, t, height)
state := pool.State()
@ -273,9 +287,7 @@ func TestVerifyPendingEvidencePasses(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, val, _ := defaultTestPool(ctx, t, height)
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -295,9 +307,7 @@ func TestVerifyDuplicatedEvidenceFails(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, val, _ := defaultTestPool(ctx, t, height)
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -321,7 +331,7 @@ func TestEventOnEvidenceValidated(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
pool, val, eventBus := defaultTestPool(ctx, t, height)
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -332,11 +342,6 @@ func TestEventOnEvidenceValidated(t *testing.T) {
)
require.NoError(t, err)
eventBus := eventbus.NewDefault(log.TestingLogger())
require.NoError(t, eventBus.Start(ctx))
pool.SetEventBus(eventBus)
const query = `tm.event='EvidenceValidated'`
evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{
ClientID: "test",
@ -348,6 +353,9 @@ func TestEventOnEvidenceValidated(t *testing.T) {
go func() {
defer close(done)
msg, err := evSub.Next(ctx)
if ctx.Err() != nil {
return
}
assert.NoError(t, err)
edt := msg.Data().(types.EventDataEvidenceValidated)
@ -394,14 +402,15 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) {
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
hash := ev.Hash()
err = pool.AddEvidence(ctx, ev)
err := pool.AddEvidence(ctx, ev)
require.NoError(t, err)
err = pool.AddEvidence(ctx, ev)
require.NoError(t, err)
@ -449,11 +458,13 @@ func TestRecoverPendingEvidence(t *testing.T) {
blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress)
require.NoError(t, err)
// create previous pool and populate it
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
// create previous pool and populate it
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
goodEvidence, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -495,9 +506,8 @@ func TestRecoverPendingEvidence(t *testing.T) {
},
}, nil)
newPool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, newStateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
newPool := evidence.NewPool(logger, evidenceDB, newStateStore, blockStore, evidence.NopMetrics(), nil)
startPool(t, newPool, newStateStore)
evList, _ := newPool.PendingEvidence(defaultEvidenceMaxBytes)
require.Equal(t, 1, len(evList))
@ -590,7 +600,7 @@ func makeCommit(height int64, valAddr []byte) *types.Commit {
return types.NewCommit(height, 0, types.BlockID{}, commitSigs)
}
func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV) {
func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV, *eventbus.EventBus) {
t.Helper()
val := types.NewMockPV()
valAddress := val.PrivKey.PubKey().Address()
@ -601,10 +611,14 @@ func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence
blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress)
require.NoError(t, err)
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err, "test evidence pool could not be created")
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
return pool, val
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
return pool, val, eventBus
}
func createState(height int64, valSet *types.ValidatorSet) sm.State {
@ -616,12 +630,3 @@ func createState(height int64, valSet *types.ValidatorSet) sm.State {
ConsensusParams: *types.DefaultConsensusParams(),
}
}
func setupEventBus(ctx context.Context, evpool *evidence.Pool) error {
eventBus := eventbus.NewDefault(log.TestingLogger())
if err := eventBus.Start(ctx); err != nil {
return err
}
evpool.SetEventBus(eventBus)
return nil
}

+ 5
- 4
internal/evidence/reactor_test.go View File

@ -82,13 +82,14 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint
}
return nil
})
rts.pools[nodeID], err = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
err = eventBus.Start(ctx)
require.NoError(t, err)
rts.pools[nodeID].SetEventBus(eventBus)
rts.pools[nodeID] = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore, evidence.NopMetrics(), eventBus)
startPool(t, rts.pools[nodeID], stateStores[idx])
require.NoError(t, err)
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)


+ 34
- 26
internal/evidence/verify_test.go View File

@ -12,6 +12,7 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/evidence"
"github.com/tendermint/tendermint/internal/evidence/mocks"
sm "github.com/tendermint/tendermint/internal/state"
@ -76,6 +77,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
attackTime := defaultEvidenceTime.Add(1 * time.Hour)
// create valid lunatic evidence
@ -96,8 +98,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header})
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
pool := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
evList := types.EvidenceList{ev}
// check that the evidence pool correctly verifies the evidence
@ -111,32 +112,29 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
// if we submit evidence only against a single byzantine validator when we see there are more validators then this
// should return an error
ev.ByzantineValidators = ev.ByzantineValidators[:1]
t.Log(evList)
assert.Error(t, pool.CheckEvidence(ctx, evList))
// restore original byz vals
ev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader)
// duplicate evidence should be rejected
evList = types.EvidenceList{ev, ev}
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
assert.Error(t, pool.CheckEvidence(ctx, evList))
// If evidence is submitted with an altered timestamp it should return an error
ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute)
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
err = pool.AddEvidence(ctx, ev)
err := pool.AddEvidence(ctx, ev)
assert.Error(t, err)
ev.Timestamp = defaultEvidenceTime
// Evidence submitted with a different validator power should fail
ev.TotalVotingPower = 1
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
err = pool.AddEvidence(ctx, ev)
assert.Error(t, err)
ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower()
@ -154,6 +152,9 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
// create a forward lunatic attack
ev, trusted, common := makeLunaticEvidence(ctx,
t, attackHeight, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime)
@ -179,10 +180,11 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
blockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit)
blockStore.On("Height").Return(nodeHeight)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
require.NoError(t, setupEventBus(ctx, pool))
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
// check that the evidence pool correctly verifies the evidence
assert.NoError(t, pool.CheckEvidence(ctx, types.EvidenceList{ev}))
@ -199,8 +201,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
oldBlockStore.On("Height").Return(nodeHeight)
require.Equal(t, defaultEvidenceTime, oldBlockStore.LoadBlockMeta(nodeHeight).Header.Time)
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, oldBlockStore, evidence.NopMetrics())
require.NoError(t, err)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, oldBlockStore, evidence.NopMetrics(), nil)
assert.Error(t, pool.CheckEvidence(ctx, types.EvidenceList{ev}))
}
@ -208,6 +209,8 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10)
conflictingHeader := factory.MakeHeader(t, &types.Header{
@ -289,10 +292,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
evList := types.EvidenceList{ev}
err = pool.CheckEvidence(ctx, evList)
@ -305,6 +308,9 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
var height int64 = 10
conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10)
@ -378,10 +384,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
evList := types.EvidenceList{ev}
err = pool.CheckEvidence(ctx, evList)
@ -401,6 +407,7 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
val := types.NewMockPV()
val2 := types.NewMockPV()
valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(ctx, 1)})
@ -478,10 +485,11 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) {
blockStore := &mocks.BlockStore{}
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}})
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
evList := types.EvidenceList{goodEv}
err = pool.CheckEvidence(ctx, evList)


+ 1
- 1
internal/inspect/inspect_test.go View File

@ -265,7 +265,7 @@ func TestBlockResults(t *testing.T) {
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
Txs: []*abcitypes.ResponseDeliverTx{
TxResults: []*abcitypes.ExecTxResult{
{
GasUsed: testGasUsed,
},


+ 5
- 5
internal/libs/autofile/autofile.go View File

@ -41,9 +41,9 @@ const (
autoFilePerms = os.FileMode(0600)
)
// errAutoFileClosed is reported when operations attempt to use an autofile
// ErrAutoFileClosed is reported when operations attempt to use an autofile
// after it has been closed.
var errAutoFileClosed = errors.New("autofile is closed")
var ErrAutoFileClosed = errors.New("autofile is closed")
// AutoFile automatically closes and re-opens file for writing. The file is
// automatically setup to close itself every 1s and upon receiving SIGHUP.
@ -155,7 +155,7 @@ func (af *AutoFile) Write(b []byte) (n int, err error) {
af.mtx.Lock()
defer af.mtx.Unlock()
if af.closed {
return 0, fmt.Errorf("write: %w", errAutoFileClosed)
return 0, fmt.Errorf("write: %w", ErrAutoFileClosed)
}
if af.file == nil {
@ -174,7 +174,7 @@ func (af *AutoFile) Write(b []byte) (n int, err error) {
func (af *AutoFile) Sync() error {
return af.withLock(func() error {
if af.closed {
return fmt.Errorf("sync: %w", errAutoFileClosed)
return fmt.Errorf("sync: %w", ErrAutoFileClosed)
} else if af.file == nil {
return nil // nothing to sync
}
@ -207,7 +207,7 @@ func (af *AutoFile) Size() (int64, error) {
af.mtx.Lock()
defer af.mtx.Unlock()
if af.closed {
return 0, fmt.Errorf("size: %w", errAutoFileClosed)
return 0, fmt.Errorf("size: %w", ErrAutoFileClosed)
}
if af.file == nil {


+ 13
- 0
internal/libs/autofile/group.go View File

@ -274,6 +274,10 @@ func (g *Group) checkTotalSizeLimit(ctx context.Context) {
g.mtx.Lock()
defer g.mtx.Unlock()
if err := ctx.Err(); err != nil {
return
}
if g.totalSizeLimit == 0 {
return
}
@ -290,6 +294,11 @@ func (g *Group) checkTotalSizeLimit(ctx context.Context) {
g.logger.Error("Group's head may grow without bound", "head", g.Head.Path)
return
}
if ctx.Err() != nil {
return
}
pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex)
fInfo, err := os.Stat(pathToRemove)
if err != nil {
@ -314,6 +323,10 @@ func (g *Group) rotateFile(ctx context.Context) {
g.mtx.Lock()
defer g.mtx.Unlock()
if err := ctx.Err(); err != nil {
return
}
headPath := g.Head.Path
if err := g.headBuf.Flush(); err != nil {


+ 12
- 0
internal/libs/flowrate/flowrate.go View File

@ -275,3 +275,15 @@ func (m *Monitor) waitNextSample(now time.Duration) time.Duration {
}
return now
}
// CurrentTransferRate returns the current transfer rate
func (m *Monitor) CurrentTransferRate() int64 {
m.mu.Lock()
defer m.mu.Unlock()
if m.sLast > m.start && m.active {
return round(m.rEMA)
}
return 0
}

+ 1
- 1
internal/libs/queue/queue_test.go View File

@ -167,7 +167,7 @@ func TestWait(t *testing.T) {
defer close(done)
got, err := q.Wait(ctx)
if err != nil {
t.Errorf("Wait: unexpected error: %w", err)
t.Errorf("Wait: unexpected error: %v", err)
} else if got != input {
t.Errorf("Wait: got %q, want %q", got, input)
}


+ 6
- 8
internal/mempool/mempool.go View File

@ -9,10 +9,10 @@ import (
"sync/atomic"
"time"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist"
"github.com/tendermint/tendermint/internal/proxy"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/types"
@ -31,7 +31,7 @@ type TxMempool struct {
logger log.Logger
metrics *Metrics
config *config.MempoolConfig
proxyAppConn proxy.AppConnMempool
proxyAppConn abciclient.Client
// txsAvailable fires once for each height when the mempool is not empty
txsAvailable chan struct{}
@ -93,8 +93,7 @@ type TxMempool struct {
func NewTxMempool(
logger log.Logger,
cfg *config.MempoolConfig,
proxyAppConn proxy.AppConnMempool,
height int64,
proxyAppConn abciclient.Client,
options ...TxMempoolOption,
) *TxMempool {
@ -102,7 +101,7 @@ func NewTxMempool(
logger: logger,
config: cfg,
proxyAppConn: proxyAppConn,
height: height,
height: -1,
cache: NopTxCache{},
metrics: NopMetrics(),
txStore: NewTxStore(),
@ -418,11 +417,10 @@ func (txmp *TxMempool) Update(
ctx context.Context,
blockHeight int64,
blockTxs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
execTxResult []*abci.ExecTxResult,
newPreFn PreCheckFunc,
newPostFn PostCheckFunc,
) error {
txmp.height = blockHeight
txmp.notifiedTxsAvailable = false
@ -434,7 +432,7 @@ func (txmp *TxMempool) Update(
}
for i, tx := range blockTxs {
if deliverTxResponses[i].Code == abci.CodeTypeOK {
if execTxResult[i].Code == abci.CodeTypeOK {
// add the valid committed transaction to the cache (if missing)
_ = txmp.cache.Push(tx)
} else if !txmp.config.KeepInvalidTxsInCache {


+ 19
- 19
internal/mempool/mempool_test.go View File

@ -78,24 +78,24 @@ func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoo
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
app := &application{kvstore.NewApplication()}
cc := abciclient.NewLocalCreator(app)
logger := log.TestingLogger()
conn := abciclient.NewLocalClient(logger, &application{
kvstore.NewApplication(),
})
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
require.NoError(t, err)
cfg.Mempool.CacheSize = cacheSize
appConnMem, err := cc(logger)
require.NoError(t, err)
require.NoError(t, appConnMem.Start(ctx))
require.NoError(t, conn.Start(ctx))
t.Cleanup(func() {
os.RemoveAll(cfg.RootDir)
cancel()
appConnMem.Wait()
conn.Wait()
})
return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, conn, options...)
}
func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
@ -172,9 +172,9 @@ func TestTxMempool_TxsAvailable(t *testing.T) {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
responses := make([]*abci.ExecTxResult, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
// commit half the transactions and ensure we fire an event
@ -204,9 +204,9 @@ func TestTxMempool_Size(t *testing.T) {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
responses := make([]*abci.ExecTxResult, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()
@ -231,9 +231,9 @@ func TestTxMempool_Flush(t *testing.T) {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
responses := make([]*abci.ExecTxResult, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()
@ -446,7 +446,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
for range ticker.C {
reapedTxs := txmp.ReapMaxTxs(200)
if len(reapedTxs) > 0 {
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
responses := make([]*abci.ExecTxResult, len(reapedTxs))
for i := 0; i < len(responses); i++ {
var code uint32
@ -456,7 +456,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
code = abci.CodeTypeOK
}
responses[i] = &abci.ResponseDeliverTx{Code: code}
responses[i] = &abci.ExecTxResult{Code: code}
}
txmp.Lock()
@ -494,9 +494,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
// reap 5 txs at the next height -- no txs should expire
reapedTxs := txmp.ReapMaxTxs(5)
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
responses := make([]*abci.ExecTxResult, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()
@ -520,9 +520,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
// removed. However, we do know that that at most 95 txs can be expired and
// removed.
reapedTxs = txmp.ReapMaxTxs(5)
responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
responses = make([]*abci.ExecTxResult, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()


+ 1
- 1
internal/mempool/mock/mempool.go View File

@ -27,7 +27,7 @@ func (Mempool) Update(
_ context.Context,
_ int64,
_ types.Txs,
_ []*abci.ResponseDeliverTx,
_ []*abci.ExecTxResult,
_ mempool.PreCheckFunc,
_ mempool.PostCheckFunc,
) error {


+ 3
- 3
internal/mempool/reactor_test.go View File

@ -242,9 +242,9 @@ func TestReactorConcurrency(t *testing.T) {
mempool.Lock()
defer mempool.Unlock()
deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
deliverTxResponses := make([]*abci.ExecTxResult, len(txs))
for i := range txs {
deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
deliverTxResponses[i] = &abci.ExecTxResult{Code: 0}
}
require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil))
@ -261,7 +261,7 @@ func TestReactorConcurrency(t *testing.T) {
mempool.Lock()
defer mempool.Unlock()
err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil)
err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil)
require.NoError(t, err)
}()
}


+ 1
- 1
internal/mempool/types.go View File

@ -66,7 +66,7 @@ type Mempool interface {
ctx context.Context,
blockHeight int64,
blockTxs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
txResults []*abci.ExecTxResult,
newPreFn PreCheckFunc,
newPostFn PostCheckFunc,
) error


+ 2
- 2
internal/p2p/conn/connection.go View File

@ -413,7 +413,7 @@ func (c *MConnection) sendSomePacketMsgs(ctx context.Context) bool {
// Block until .sendMonitor says we can write.
// Once we're ready we send more than we asked for,
// but amortized it should even out.
c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
c.sendMonitor.Limit(c._maxPacketMsgSize, c.config.SendRate, true)
// Now send some PacketMsgs.
for i := 0; i < numBatchPacketMsgs; i++ {
@ -481,7 +481,7 @@ FOR_LOOP:
}
// Block until .recvMonitor says we can read.
c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
c.recvMonitor.Limit(c._maxPacketMsgSize, c.config.RecvRate, true)
// Peek into bufConnReader for debugging
/*


+ 8
- 8
internal/p2p/conn/secret_connection_test.go View File

@ -126,7 +126,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
nodePrvKey := ed25519.GenPrivKey()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil {
t.Errorf("failed to establish SecretConnection for node: %w", err)
t.Errorf("failed to establish SecretConnection for node: %v", err)
return nil, true, err
}
// In parallel, handle some reads and writes.
@ -136,7 +136,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil {
t.Errorf("failed to write to nodeSecretConn: %w", err)
t.Errorf("failed to write to nodeSecretConn: %v", err)
return nil, true, err
}
if n != len(nodeWrite) {
@ -163,7 +163,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
}
return nil, false, nil
} else if err != nil {
t.Errorf("failed to read from nodeSecretConn: %w", err)
t.Errorf("failed to read from nodeSecretConn: %v", err)
return nil, true, err
}
*nodeReads = append(*nodeReads, string(readBuffer[:n]))
@ -288,7 +288,7 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n i
for i := 0; i < n; i++ {
_, err := conn.Write([]byte(txt))
if err != nil {
t.Errorf("failed to write to fooSecConn: %w", err)
t.Errorf("failed to write to fooSecConn: %v", err)
return
}
}
@ -343,7 +343,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
func(_ int) (val interface{}, abort bool, err error) {
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
if err != nil {
tb.Errorf("failed to establish SecretConnection for foo: %w", err)
tb.Errorf("failed to establish SecretConnection for foo: %v", err)
return nil, true, err
}
remotePubBytes := fooSecConn.RemotePubKey()
@ -358,7 +358,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
func(_ int) (val interface{}, abort bool, err error) {
barSecConn, err = MakeSecretConnection(barConn, barPrvKey)
if barSecConn == nil {
tb.Errorf("failed to establish SecretConnection for bar: %w", err)
tb.Errorf("failed to establish SecretConnection for bar: %v", err)
return nil, true, err
}
remotePubBytes := barSecConn.RemotePubKey()
@ -405,7 +405,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
if err == io.EOF {
return
} else if err != nil {
b.Errorf("failed to read from barSecConn: %w", err)
b.Errorf("failed to read from barSecConn: %v", err)
return
}
}
@ -416,7 +416,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
idx := mrand.Intn(len(fooWriteBytes))
_, err := fooSecConn.Write(fooWriteBytes[idx])
if err != nil {
b.Errorf("failed to write to fooSecConn: %w", err)
b.Errorf("failed to write to fooSecConn: %v", err)
return
}
}


+ 3
- 4
internal/p2p/p2ptest/network.go View File

@ -101,10 +101,8 @@ func (n *Network) Start(ctx context.Context, t *testing.T) {
case <-ctx.Done():
require.Fail(t, "operation canceled")
case peerUpdate := <-sourceSub.Updates():
require.Equal(t, p2p.PeerUpdate{
NodeID: targetNode.NodeID,
Status: p2p.PeerStatusUp,
}, peerUpdate)
require.Equal(t, targetNode.NodeID, peerUpdate.NodeID)
require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status)
case <-time.After(3 * time.Second):
require.Fail(t, "timed out waiting for peer", "%v dialing %v",
sourceNode.NodeID, targetNode.NodeID)
@ -114,6 +112,7 @@ func (n *Network) Start(ctx context.Context, t *testing.T) {
case <-ctx.Done():
require.Fail(t, "operation canceled")
case peerUpdate := <-targetSub.Updates():
peerUpdate.Channels = nil
require.Equal(t, p2p.PeerUpdate{
NodeID: sourceNode.NodeID,
Status: p2p.PeerStatusUp,


+ 7
- 3
internal/p2p/p2ptest/require.go View File

@ -136,8 +136,8 @@ func RequireUpdate(t *testing.T, peerUpdates *p2p.PeerUpdates, expect p2p.PeerUp
select {
case update := <-peerUpdates.Updates():
require.Equal(t, expect, update, "peer update did not match")
require.Equal(t, expect.NodeID, update.NodeID, "node id did not match")
require.Equal(t, expect.Status, update.Status, "statuses did not match")
case <-timer.C:
require.Fail(t, "timed out waiting for peer update", "expected %v", expect)
}
@ -155,7 +155,11 @@ func RequireUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates, expect []p2p.Pee
case update := <-peerUpdates.Updates():
actual = append(actual, update)
if len(actual) == len(expect) {
require.Equal(t, expect, actual)
for idx := range expect {
require.Equal(t, expect[idx].NodeID, actual[idx].NodeID)
require.Equal(t, expect[idx].Status, actual[idx].Status)
}
return
}


+ 16
- 9
internal/p2p/peermanager.go View File

@ -47,8 +47,9 @@ const (
// PeerUpdate is a peer update event sent via PeerUpdates.
type PeerUpdate struct {
NodeID types.NodeID
Status PeerStatus
NodeID types.NodeID
Status PeerStatus
Channels ChannelIDSet
}
// PeerUpdates is a peer update subscription with notifications about peer
@ -674,19 +675,23 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error {
return nil
}
// Ready marks a peer as ready, broadcasting status updates to subscribers. The
// peer must already be marked as connected. This is separate from Dialed() and
// Accepted() to allow the router to set up its internal queues before reactors
// start sending messages.
func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID) {
// Ready marks a peer as ready, broadcasting status updates to
// subscribers. The peer must already be marked as connected. This is
// separate from Dialed() and Accepted() to allow the router to set up
// its internal queues before reactors start sending messages. The
// channels set here are passed in the peer update broadcast to
// reactors, which can then mediate their own behavior based on the
// capability of the peers.
func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID, channels ChannelIDSet) {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.connected[peerID] {
m.ready[peerID] = true
m.broadcast(ctx, PeerUpdate{
NodeID: peerID,
Status: PeerStatusUp,
NodeID: peerID,
Status: PeerStatusUp,
Channels: channels,
})
}
}
@ -1208,6 +1213,7 @@ type peerInfo struct {
// These fields are ephemeral, i.e. not persisted to the database.
Persistent bool
Seed bool
Height int64
FixedScore PeerScore // mainly for tests
@ -1230,6 +1236,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
return nil, err
}
p.AddressInfo[addressInfo.Address] = addressInfo
}
return p, p.Validate()
}


+ 37
- 13
internal/p2p/peermanager_test.go View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
@ -1311,7 +1312,7 @@ func TestPeerManager_Ready(t *testing.T) {
require.Equal(t, p2p.PeerStatusDown, peerManager.Status(a.NodeID))
// Marking a as ready should transition it to PeerStatusUp and send an update.
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID))
require.Equal(t, p2p.PeerUpdate{
NodeID: a.NodeID,
@ -1323,11 +1324,34 @@ func TestPeerManager_Ready(t *testing.T) {
require.NoError(t, err)
require.True(t, added)
require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID))
peerManager.Ready(ctx, b.NodeID)
peerManager.Ready(ctx, b.NodeID, nil)
require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID))
require.Empty(t, sub.Updates())
}
func TestPeerManager_Ready_Channels(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pm, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
sub := pm.Subscribe(ctx)
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
added, err := pm.Add(a)
require.NoError(t, err)
require.True(t, added)
require.NoError(t, pm.Accepted(a.NodeID))
pm.Ready(ctx, a.NodeID, p2p.ChannelIDSet{42: struct{}{}})
require.NotEmpty(t, sub.Updates())
update := <-sub.Updates()
assert.Equal(t, a.NodeID, update.NodeID)
require.True(t, update.Channels.Contains(42))
require.False(t, update.Channels.Contains(48))
}
// See TryEvictNext for most tests, this just tests blocking behavior.
func TestPeerManager_EvictNext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
@ -1342,7 +1366,7 @@ func TestPeerManager_EvictNext(t *testing.T) {
require.NoError(t, err)
require.True(t, added)
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
// Since there are no peers to evict, EvictNext should block until timeout.
timeoutCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
@ -1378,7 +1402,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) {
require.NoError(t, err)
require.True(t, added)
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
// Spawn a goroutine to error a peer after a delay.
go func() {
@ -1413,7 +1437,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
require.NoError(t, err)
require.True(t, added)
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
// Spawn a goroutine to upgrade to b with a delay.
go func() {
@ -1454,7 +1478,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
require.NoError(t, err)
require.True(t, added)
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
// Spawn a goroutine to upgrade b with a delay.
go func() {
@ -1489,7 +1513,7 @@ func TestPeerManager_TryEvictNext(t *testing.T) {
// Connecting to a won't evict anything either.
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
// But if a errors it should be evicted.
peerManager.Errored(a.NodeID, errors.New("foo"))
@ -1536,7 +1560,7 @@ func TestPeerManager_Disconnected(t *testing.T) {
_, err = peerManager.Add(a)
require.NoError(t, err)
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID))
require.NotEmpty(t, sub.Updates())
require.Equal(t, p2p.PeerUpdate{
@ -1591,7 +1615,7 @@ func TestPeerManager_Errored(t *testing.T) {
require.Zero(t, evict)
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
evict, err = peerManager.TryEvictNext()
require.NoError(t, err)
require.Zero(t, evict)
@ -1624,7 +1648,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
require.NoError(t, peerManager.Accepted(a.NodeID))
require.Empty(t, sub.Updates())
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
require.NotEmpty(t, sub.Updates())
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates())
@ -1641,7 +1665,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
require.NoError(t, peerManager.Dialed(a))
require.Empty(t, sub.Updates())
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
require.NotEmpty(t, sub.Updates())
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates())
@ -1683,7 +1707,7 @@ func TestPeerManager_Subscribe_Close(t *testing.T) {
require.NoError(t, peerManager.Accepted(a.NodeID))
require.Empty(t, sub.Updates())
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
require.NotEmpty(t, sub.Updates())
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates())
@ -1716,7 +1740,7 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) {
require.NoError(t, err)
require.True(t, added)
require.NoError(t, peerManager.Accepted(a.NodeID))
peerManager.Ready(ctx, a.NodeID)
peerManager.Ready(ctx, a.NodeID, nil)
expectUp := p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}
require.NotEmpty(t, s1)


+ 93
- 134
internal/p2p/pex/reactor.go View File

@ -3,14 +3,12 @@ package pex
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/libs/service"
protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
@ -42,7 +40,7 @@ const (
minReceiveRequestInterval = 100 * time.Millisecond
// the maximum amount of addresses that can be included in a response
maxAddresses uint16 = 100
maxAddresses = 100
// How long to wait when there are no peers available before trying again
noAvailablePeersWaitPeriod = 1 * time.Second
@ -100,15 +98,8 @@ type Reactor struct {
// minReceiveRequestInterval).
lastReceivedRequests map[types.NodeID]time.Time
// keep track of how many new peers to existing peers we have received to
// extrapolate the size of the network
newPeers uint32
totalPeers uint32
// discoveryRatio is the inverse ratio of new peers to old peers squared.
// This is multiplied by the minimum duration to calculate how long to wait
// between each request.
discoveryRatio float32
// the total number of unique peers added
totalPeers int
}
// NewReactor returns a reference to a new reactor.
@ -156,16 +147,6 @@ func (r *Reactor) OnStop() {}
// processPexCh implements a blocking event loop where we listen for p2p
// Envelope messages from the pexCh.
func (r *Reactor) processPexCh(ctx context.Context) {
timer := time.NewTimer(0)
defer timer.Stop()
r.mtx.Lock()
var (
duration = r.calculateNextRequestTime()
err error
)
r.mtx.Unlock()
incoming := make(chan *p2p.Envelope)
go func() {
defer close(incoming)
@ -179,36 +160,51 @@ func (r *Reactor) processPexCh(ctx context.Context) {
}
}()
// Initially, we will request peers quickly to bootstrap. This duration
// will be adjusted upward as knowledge of the network grows.
var nextPeerRequest = minReceiveRequestInterval
timer := time.NewTimer(0)
defer timer.Stop()
for {
timer.Reset(duration)
timer.Reset(nextPeerRequest)
select {
case <-ctx.Done():
return
// outbound requests for new peers
case <-timer.C:
duration, err = r.sendRequestForPeers(ctx)
if err != nil {
// Send a request for more peer addresses.
if err := r.sendRequestForPeers(ctx); err != nil {
return
// TODO(creachadair): Do we really want to stop processing the PEX
// channel just because of an error here?
}
// inbound requests for new peers or responses to requests sent by this
// reactor
// Note we do not update the poll timer upon making a request, only
// when we receive an update that updates our priors.
case envelope, ok := <-incoming:
if !ok {
return
return // channel closed
}
duration, err = r.handleMessage(ctx, r.pexCh.ID, envelope)
// A request from another peer, or a response to one of our requests.
dur, err := r.handlePexMessage(ctx, envelope)
if err != nil {
r.logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err)
r.logger.Error("failed to process message",
"ch_id", r.pexCh.ID, "envelope", envelope, "err", err)
if serr := r.pexCh.SendError(ctx, p2p.PeerError{
NodeID: envelope.From,
Err: err,
}); serr != nil {
return
}
} else if dur != 0 {
// We got a useful result; update the poll timer.
nextPeerRequest = dur
}
}
}
}
@ -228,19 +224,20 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) {
}
// handlePexMessage handles envelopes sent from peers on the PexChannel.
// If an update was received, a new polling interval is returned; otherwise the
// duration is 0.
func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope) (time.Duration, error) {
logger := r.logger.With("peer", envelope.From)
switch msg := envelope.Message.(type) {
case *protop2p.PexRequest:
// check if the peer hasn't sent a prior request too close to this one
// in time
// Verify that this peer hasn't sent us another request too recently.
if err := r.markPeerRequest(envelope.From); err != nil {
return time.Minute, err
return 0, err
}
// request peers from the peer manager and parse the NodeAddresses into
// URL strings
// Fetch peers from the peer manager, convert NodeAddresses into URL
// strings, and send them back to the caller.
nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses)
pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses))
for idx, addr := range nodeAddresses {
@ -248,28 +245,24 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope)
URL: addr.String(),
}
}
if err := r.pexCh.Send(ctx, p2p.Envelope{
return 0, r.pexCh.Send(ctx, p2p.Envelope{
To: envelope.From,
Message: &protop2p.PexResponse{Addresses: pexAddresses},
}); err != nil {
return 0, err
}
})
return time.Second, nil
case *protop2p.PexResponse:
// check if the response matches a request that was made to that peer
// Verify that this response corresponds to one of our pending requests.
if err := r.markPeerResponse(envelope.From); err != nil {
return time.Minute, err
return 0, err
}
// check the size of the response
if len(msg.Addresses) > int(maxAddresses) {
return 10 * time.Minute, fmt.Errorf("peer sent too many addresses (max: %d, got: %d)",
maxAddresses,
len(msg.Addresses),
)
// Verify that the response does not exceed the safety limit.
if len(msg.Addresses) > maxAddresses {
return 0, fmt.Errorf("peer sent too many addresses (%d > maxiumum %d)",
len(msg.Addresses), maxAddresses)
}
var numAdded int
for _, pexAddress := range msg.Addresses {
peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL)
if err != nil {
@ -278,45 +271,19 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope)
added, err := r.peerManager.Add(peerAddress)
if err != nil {
logger.Error("failed to add PEX address", "address", peerAddress, "err", err)
continue
}
if added {
r.newPeers++
numAdded++
logger.Debug("added PEX address", "address", peerAddress)
}
r.totalPeers++
}
return 10 * time.Minute, nil
default:
return time.Second, fmt.Errorf("received unknown message: %T", msg)
}
}
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (duration time.Duration, err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e)
r.logger.Error(
"recovering from processing message panic",
"err", err,
"stack", string(debug.Stack()),
)
}
}()
r.logger.Debug("received PEX message", "peer", envelope.From)
return r.calculateNextRequestTime(numAdded), nil
switch chID {
case p2p.ChannelID(PexChannel):
duration, err = r.handlePexMessage(ctx, envelope)
default:
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
return 0, fmt.Errorf("received unknown message: %T", msg)
}
return
}
// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we
@ -338,95 +305,87 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
}
}
// sendRequestForPeers pops the first peerID off the list and sends the
// peer a request for more peer addresses. The function then moves the
// peer into the requestsSent bucket and calculates when the next request
// time should be
func (r *Reactor) sendRequestForPeers(ctx context.Context) (time.Duration, error) {
// sendRequestForPeers chooses a peer from the set of available peers and sends
// that peer a request for more peer addresses. The chosen peer is moved into
// the requestsSent bucket so that we will not attempt to contact them again
// until they've replied or updated.
func (r *Reactor) sendRequestForPeers(ctx context.Context) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if len(r.availablePeers) == 0 {
// no peers are available
r.logger.Debug("no available peers to send request to, waiting...")
return noAvailablePeersWaitPeriod, nil
r.logger.Debug("no available peers to send a PEX request to (retrying)")
return nil
}
var peerID types.NodeID
// use range to get a random peer.
// Select an arbitrary peer from the available set.
var peerID types.NodeID
for peerID = range r.availablePeers {
break
}
// send out the pex request
if err := r.pexCh.Send(ctx, p2p.Envelope{
To: peerID,
Message: &protop2p.PexRequest{},
}); err != nil {
return 0, err
return err
}
// remove the peer from the abvailable peers list and mark it in the requestsSent map
// Move the peer from available to pending.
delete(r.availablePeers, peerID)
r.requestsSent[peerID] = struct{}{}
dur := r.calculateNextRequestTime()
r.logger.Debug("peer request sent", "next_request_time", dur)
return dur, nil
return nil
}
// calculateNextRequestTime implements something of a proportional controller
// to estimate how often the reactor should be requesting new peer addresses.
// The dependent variable in this calculation is the ratio of new peers to
// all peers that the reactor receives. The interval is thus calculated as the
// inverse squared. In the beginning, all peers should be new peers.
// We expect this ratio to be near 1 and thus the interval to be as short
// as possible. As the node becomes more familiar with the network the ratio of
// new nodes will plummet to a very small number, meaning the interval expands
// to its upper bound.
// calculateNextRequestTime selects how long we should wait before attempting
// to send out another request for peer addresses.
//
// This implements a simplified proportional control mechanism to poll more
// often when our knowledge of the network is incomplete, and less often as our
// knowledge grows. To estimate our knowledge of the network, we use the
// fraction of "new" peers (addresses we have not previously seen) to the total
// so far observed. When we first join the network, this fraction will be close
// to 1, meaning most new peers are "new" to us, and as we discover more peers,
// the fraction will go toward zero.
//
// CONTRACT: The caller must hold r.mtx exclusively when calling this method.
func (r *Reactor) calculateNextRequestTime() time.Duration {
// check if the peer store is full. If so then there is no need
// to send peer requests too often
// The minimum interval will be minReceiveRequestInterval to ensure we will not
// request from any peer more often than we would allow them to do from us.
func (r *Reactor) calculateNextRequestTime(added int) time.Duration {
r.mtx.Lock()
defer r.mtx.Unlock()
r.totalPeers += added
// If the peer store is nearly full, wait the maximum interval.
if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 {
r.logger.Debug("peer manager near full ratio, sleeping...",
r.logger.Debug("Peer manager is nearly full",
"sleep_period", fullCapacityInterval, "ratio", ratio)
return fullCapacityInterval
}
// baseTime represents the shortest interval that we can send peer requests
// in. For example if we have 10 peers and we can't send a message to the
// same peer every 500ms, then we can send a request every 50ms. In practice
// we use a safety margin of 2, ergo 100ms
peers := tmmath.MinInt(len(r.availablePeers), 50)
baseTime := minReceiveRequestInterval
if peers > 0 {
baseTime = minReceiveRequestInterval * 2 / time.Duration(peers)
// If there are no available peers to query, poll less aggressively.
if len(r.availablePeers) == 0 {
r.logger.Debug("No available peers to send a PEX request",
"sleep_period", noAvailablePeersWaitPeriod)
return noAvailablePeersWaitPeriod
}
if r.totalPeers > 0 || r.discoveryRatio == 0 {
// find the ratio of new peers. NOTE: We add 1 to both sides to avoid
// divide by zero problems
ratio := float32(r.totalPeers+1) / float32(r.newPeers+1)
// square the ratio in order to get non linear time intervals
// NOTE: The longest possible interval for a network with 100 or more peers
// where a node is connected to 50 of them is 2 minutes.
r.discoveryRatio = ratio * ratio
r.newPeers = 0
r.totalPeers = 0
}
// NOTE: As ratio is always >= 1, discovery ratio is >= 1. Therefore we don't need to worry
// about the next request time being less than the minimum time
return baseTime * time.Duration(r.discoveryRatio)
// Reaching here, there are available peers to query and the peer store
// still has space. Estimate our knowledge of the network from the latest
// update and choose a new interval.
base := float64(minReceiveRequestInterval) / float64(len(r.availablePeers))
multiplier := float64(r.totalPeers+1) / float64(added+1) // +1 to avert zero division
return time.Duration(base*multiplier*multiplier) + minReceiveRequestInterval
}
func (r *Reactor) markPeerRequest(peer types.NodeID) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok {
if time.Now().Before(lastRequestTime.Add(minReceiveRequestInterval)) {
return fmt.Errorf("peer sent a request too close after a prior one. Minimum interval: %v",
minReceiveRequestInterval)
if d := time.Since(lastRequestTime); d < minReceiveRequestInterval {
return fmt.Errorf("peer %v sent PEX request too soon (%v < minimum %v)",
peer, d, minReceiveRequestInterval)
}
}
r.lastReceivedRequests[peer] = time.Now()


+ 9
- 12
internal/p2p/pex/reactor_test.go View File

@ -1,6 +1,4 @@
// Temporarily disabled pending ttps://github.com/tendermint/tendermint/issues/7626.
//go:build issue7626
//nolint:unused
package pex_test
import (
@ -98,11 +96,12 @@ func TestReactorSendsRequestsTooOften(t *testing.T) {
peerErr := <-r.pexErrCh
require.Error(t, peerErr.Err)
require.Empty(t, r.pexOutCh)
require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one")
require.Contains(t, peerErr.Err.Error(), "sent PEX request too soon")
require.Equal(t, badNode, peerErr.NodeID)
}
func TestReactorSendsResponseWithoutRequest(t *testing.T) {
t.Skip("This test needs updated https://github.com/tendermint/tendermint/issue/7634")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -124,6 +123,7 @@ func TestReactorSendsResponseWithoutRequest(t *testing.T) {
}
func TestReactorNeverSendsTooManyPeers(t *testing.T) {
t.Skip("This test needs updated https://github.com/tendermint/tendermint/issue/7634")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -235,6 +235,7 @@ func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) {
}
func TestReactorWithNetworkGrowth(t *testing.T) {
t.Skip("This test needs updated https://github.com/tendermint/tendermint/issue/7634")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -686,20 +687,16 @@ func (r *reactorTestSuite) connectPeers(ctx context.Context, t *testing.T, sourc
select {
case peerUpdate := <-targetSub.Updates():
require.Equal(t, p2p.PeerUpdate{
NodeID: node1,
Status: p2p.PeerStatusUp,
}, peerUpdate)
require.Equal(t, peerUpdate.NodeID, node1)
require.Equal(t, peerUpdate.Status, p2p.PeerStatusUp)
case <-time.After(2 * time.Second):
require.Fail(t, "timed out waiting for peer", "%v accepting %v",
targetNode, sourceNode)
}
select {
case peerUpdate := <-sourceSub.Updates():
require.Equal(t, p2p.PeerUpdate{
NodeID: node2,
Status: p2p.PeerStatusUp,
}, peerUpdate)
require.Equal(t, peerUpdate.NodeID, node2)
require.Equal(t, peerUpdate.Status, p2p.PeerStatusUp)
case <-time.After(2 * time.Second):
require.Fail(t, "timed out waiting for peer", "%v dialing %v",
sourceNode, targetNode)


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save