diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index db157ad37..8bd44fe8a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | @@ -44,7 +44,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | @@ -66,7 +66,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bf79568c9..8f88e9d49 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,7 +13,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - name: Prepare id: prep run: | @@ -43,7 +43,7 @@ jobs: - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v1.13.0 + uses: docker/login-action@v1.14.1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index 1e5f6c36c..d80415340 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -19,7 +19,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 38cb3a9d4..7febc491b 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -24,7 +24,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 with: ref: 'v0.34.x' diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index 425108169..1914f9e57 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -24,7 +24,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 with: ref: 'v0.35.x' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index bc4feae45..19cf5cce8 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -23,7 +23,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 71aec16f7..b5d8df0af 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: '1.17' - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index e12ee2321..569442362 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -17,7 +17,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - name: Install go-fuzz working-directory: test/fuzz diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 60b49443d..8ffb29e8c 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the Jepsen repository - uses: actions/checkout@v2.4.0 + uses: actions/checkout@v3 with: repository: 'tendermint/jepsen' diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index af446771a..89eabc77e 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -6,7 +6,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 with: folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 40b00eba1..e22dde8fc 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,17 +13,20 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v2 + with: + go-version: '^1.17' - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | **/**.go go.mod go.sum - - uses: golangci/golangci-lint-action@v2.5.2 + - uses: golangci/golangci-lint-action@v3.1.0 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.42.1 + version: v1.44 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index d430485ab..badae8c1f 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2.4.0 + uses: actions/checkout@v3 - name: Lint Code Base uses: docker://github/super-linter:v4 env: diff --git a/.github/workflows/markdown-links.yml b/.github/workflows/markdown-links.yml index 3e3341d16..a03dd9b72 100644 --- a/.github/workflows/markdown-links.yml +++ b/.github/workflows/markdown-links.yml @@ -1,17 +1,19 @@ -name: Check Markdown links +# TODO: Re-enable when https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/126 lands. -on: - push: - branches: - - master - pull_request: - branches: [master] - -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 - with: - check-modified-files-only: 'yes' +#name: Check Markdown links +# +#on: +# push: +# branches: +# - master +# pull_request: +# branches: [master] +# +#jobs: +# markdown-link-check: +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v3 +# - uses: gaurav-nelson/github-action-markdown-link-check@v1.0.13 +# with: +# check-modified-files-only: 'yes' diff --git a/.github/workflows/proto-check.yml b/.github/workflows/proto-check.yml deleted file mode 100644 index 306e62903..000000000 --- a/.github/workflows/proto-check.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: Proto Check -# Protobuf runs buf (https://buf.build/) lint and check-breakage -# This workflow is only run when a file in the proto directory -# has been modified. -on: - workflow_dispatch: # allow running workflow manually - pull_request: - paths: - - "proto/*" -jobs: - proto-lint: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.4.0 - - name: lint - run: make proto-lint - proto-breakage: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.4.0 - - name: check-breakage - run: make proto-check-breaking-ci diff --git a/.github/workflows/proto-dockerfile.yml b/.github/workflows/proto-dockerfile.yml deleted file mode 100644 index 0d08758b6..000000000 --- a/.github/workflows/proto-dockerfile.yml +++ /dev/null @@ -1,64 +0,0 @@ -# This workflow (re)builds and pushes a Docker image containing the -# protobuf build tools used by the other workflows. -# -# When making changes that require updates to the builder image, you -# should merge the updates first and wait for this workflow to complete, -# so that the changes will be available for the dependent workflows. -# - -name: Build & Push Proto Builder Image -on: - pull_request: - paths: - - "proto/*" - push: - branches: - - master - paths: - - "proto/*" - schedule: - # run this job once a month to recieve any go or buf updates - - cron: "0 9 1 * *" - -env: - REGISTRY: ghcr.io - IMAGE_NAME: tendermint/docker-build-proto - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.4.0 - - name: Check out and assign tags - id: prep - run: | - DOCKER_IMAGE="${REGISTRY}/${IMAGE_NAME}" - VERSION=noop - if [[ "$GITHUB_REF" == "refs/tags/*" ]]; then - VERSION="${GITHUB_REF#refs/tags/}" - elif [[ "$GITHUB_REF" == "refs/heads/*" ]]; then - VERSION="$(echo "${GITHUB_REF#refs/heads/}" | sed -r 's#/+#-#g')" - if [[ "${{ github.event.repository.default_branch }}" = "$VERSION" ]]; then - VERSION=latest - fi - fi - TAGS="${DOCKER_IMAGE}:${VERSION}" - echo ::set-output name=tags::"${TAGS}" - - - name: Set up docker buildx - uses: docker/setup-buildx-action@v1.6.0 - - - name: Log in to the container registry - uses: docker/login-action@v1.13.0 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and publish image - uses: docker/build-push-action@v2.9.0 - with: - context: ./proto - file: ./proto/Dockerfile - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.prep.outputs.tags }} diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml new file mode 100644 index 000000000..6e7016b40 --- /dev/null +++ b/.github/workflows/proto-lint.yml @@ -0,0 +1,21 @@ +name: Protobuf Lint +on: + pull_request: + paths: + - 'proto/**' + push: + branches: + - master + paths: + - 'proto/**' + +jobs: + lint: + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v3 + - uses: bufbuild/buf-setup-action@v1.1.0 + - uses: bufbuild/buf-lint-action@v1 + with: + input: 'proto' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3d65b289b..d3a6a8a71 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2.4.0 + uses: actions/checkout@v3 with: fetch-depth: 0 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f3f5cba1d..b0736dabe 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a2696f79..0216a533b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,27 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). +## v0.35.2 + +February 28, 2022 + +Special thanks to external contributors on this release: @ashcherbakov, @yihuang, @waelsy123 + +### IMPROVEMENTS + +- [consensus] [\#7875](https://github.com/tendermint/tendermint/pull/7875) additional timing metrics. (@williambanfield) + +### BUG FIXES + +- [abci] [\#7990](https://github.com/tendermint/tendermint/pull/7990) revert buffer limit change. (@williambanfield) +- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang) +- [cli] [\#7869](https://github.com/tendermint/tendermint/pull/7869) Update unsafe-reset-all command to match release v35. (waelsy123) +- [light] [\#7640](https://github.com/tendermint/tendermint/pull/7640) Light Client: fix absence proof verification (@ashcherbakov) +- [light] [\#7641](https://github.com/tendermint/tendermint/pull/7641) Light Client: fix querying against the latest height (@ashcherbakov) +- [mempool] [\#7718](https://github.com/tendermint/tendermint/pull/7718) return duplicate tx errors more consistently. (@tychoish) +- [rpc] [\#7744](https://github.com/tendermint/tendermint/pull/7744) fix layout of endpoint list. (@creachadair) +- [statesync] [\#7886](https://github.com/tendermint/tendermint/pull/7886) assert app version matches. (@cmwaters) + ## v0.35.1 January 26, 2022 diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index e484039d0..e7aa904e3 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -19,6 +19,7 @@ Special thanks to external contributors on this release: - [rpc] \#7713 Remove unused options for websocket clients. (@creachadair) - [config] \#7930 Add new event subscription options and defaults. (@creachadair) - [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair) + - [cli] \#8081 make the reset command safe to use. (@marbar3778) - Apps diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e4613f84e..bfa56bea6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -105,11 +105,33 @@ specify exactly the dependency you want to update, eg. ## Protobuf -We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core. +We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along +with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use +across Tendermint Core. -For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. +To generate proto stubs, lint, and check protos for breaking changes, you will +need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root +of the repository, run: -We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`. +```bash +# Lint all of the .proto files in proto/tendermint +make proto-lint + +# Check if any of your local changes (prior to committing to the Git repository) +# are breaking +make proto-check-breaking + +# Generate Go code from the .proto files in proto/tendermint +make proto-gen +``` + +To automatically format `.proto` files, you will need +[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once +installed, you can run: + +```bash +make proto-format +``` ### Visual Studio Code diff --git a/Makefile b/Makefile index 1a3a1b1dd..13d6ada56 100644 --- a/Makefile +++ b/Makefile @@ -13,8 +13,6 @@ endif LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" -BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto -DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE) CGO_ENABLED ?= 0 # handle nostrip @@ -73,41 +71,57 @@ install: $(BUILDDIR)/: mkdir -p $@ -# The Docker image containing the generator, formatter, and linter. -# This is generated by proto/Dockerfile. To update tools, make changes -# there and run the Build & Push Proto Builder Image workflow. -IMAGE := ghcr.io/tendermint/docker-build-proto:latest -DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE) -HTTPS_GIT := https://github.com/tendermint/tendermint.git ############################################################################### ### Protobuf ### ############################################################################### -proto-all: proto-lint proto-check-breaking -.PHONY: proto-all +check-proto-deps: +ifeq (,$(shell which buf)) + $(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.") +endif +ifeq (,$(shell which protoc-gen-gogofaster)) + $(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install") +endif +.PHONY: check-proto-deps -proto-gen: +check-proto-format-deps: +ifeq (,$(shell which clang-format)) + $(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.") +endif +.PHONY: check-proto-format-deps + +proto-gen: check-proto-deps @echo "Generating Protobuf files" - @$(DOCKER_PROTO_BUILDER) buf generate --template=./buf.gen.yaml --config ./buf.yaml + @buf generate + @mv ./proto/tendermint/abci/types.pb.go ./abci/types/ .PHONY: proto-gen -proto-lint: - @$(DOCKER_PROTO_BUILDER) buf lint --error-format=json --config ./buf.yaml +# These targets are provided for convenience and are intended for local +# execution only. +proto-lint: check-proto-deps + @echo "Linting Protobuf files" + @buf lint .PHONY: proto-lint -proto-format: +proto-format: check-proto-format-deps @echo "Formatting Protobuf files" - @$(DOCKER_PROTO_BUILDER) find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \; + @find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \; .PHONY: proto-format -proto-check-breaking: - @$(DOCKER_PROTO_BUILDER) buf breaking --against .git --config ./buf.yaml +proto-check-breaking: check-proto-deps + @echo "Checking for breaking changes in Protobuf files against local branch" + @echo "Note: This is only useful if your changes have not yet been committed." + @echo " Otherwise read up on buf's \"breaking\" command usage:" + @echo " https://docs.buf.build/breaking/usage" + @buf breaking --against ".git" .PHONY: proto-check-breaking -proto-check-breaking-ci: - @$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT) --config ./buf.yaml -.PHONY: proto-check-breaking-ci +# TODO: Should be removed when work on ABCI++ is complete. +# For more information, see https://github.com/tendermint/tendermint/issues/8066 +abci-proto-gen: + ./scripts/abci-gen.sh +.PHONY: abci-proto-gen ############################################################################### ### Build ABCI ### @@ -222,9 +236,7 @@ build-docs: mkdir -p ~/output/$${path_prefix} ; \ cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ cp ~/output/$${path_prefix}/index.html ~/output ; \ - done < versions ; \ - mkdir -p ~/output/master ; \ - cp -r .vuepress/dist/* ~/output/master/ + done < versions ; .PHONY: build-docs ############################################################################### @@ -331,3 +343,4 @@ split-test-packages:$(BUILDDIR)/packages.txt split -d -n l/$(NUM_SPLIT) $< $<. test-group-%:split-test-packages cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out + diff --git a/README.md b/README.md index 9400e6b12..3e375791f 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,7 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap - [Terra](https://www.terra.money/) - [Celestia](https://celestia.org/) - [Anoma](https://anoma.network/) +- [Vocdoni](https://docs.vocdoni.io/) ### Research diff --git a/abci/client/creators.go b/abci/client/creators.go deleted file mode 100644 index 1eaa95d64..000000000 --- a/abci/client/creators.go +++ /dev/null @@ -1,33 +0,0 @@ -package abciclient - -import ( - "fmt" - - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" -) - -// Creator creates new ABCI clients. -type Creator func(log.Logger) (Client, error) - -// NewLocalCreator returns a Creator for the given app, -// which will be running locally. -func NewLocalCreator(app types.Application) Creator { - return func(logger log.Logger) (Client, error) { - return NewLocalClient(logger, app), nil - } -} - -// NewRemoteCreator returns a Creator for the given address (e.g. -// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you -// want the client to connect before reporting success. -func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator { - return func(log.Logger) (Client, error) { - remoteApp, err := NewClient(logger, addr, transport, mustConnect) - if err != nil { - return nil, fmt.Errorf("failed to connect to proxy: %w", err) - } - - return remoteApp, nil - } -} diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go deleted file mode 100644 index 9afcce739..000000000 --- a/abci/client/socket_client_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package abciclient_test - -import ( - "context" - "fmt" - "testing" - "time" - - "math/rand" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/server" - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" -) - -func TestProperSyncCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - app := slowApp{} - logger := log.NewNopLogger() - - _, c := setupClientServer(ctx, t, logger, app) - - resp := make(chan error, 1) - go func() { - rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{}) - assert.NoError(t, err) - assert.NoError(t, c.Flush(ctx)) - assert.NotNil(t, rsp) - select { - case <-ctx.Done(): - case resp <- c.Error(): - } - }() - - select { - case <-time.After(time.Second): - require.Fail(t, "No response arrived") - case err, ok := <-resp: - require.True(t, ok, "Must not close channel") - assert.NoError(t, err, "This should return success") - } -} - -func setupClientServer( - ctx context.Context, - t *testing.T, - logger log.Logger, - app types.Application, -) (service.Service, abciclient.Client) { - t.Helper() - - // some port between 20k and 30k - port := 20000 + rand.Int31()%10000 - addr := fmt.Sprintf("localhost:%d", port) - - s, err := server.NewServer(logger, addr, "socket", app) - require.NoError(t, err) - require.NoError(t, s.Start(ctx)) - t.Cleanup(s.Wait) - - c := abciclient.NewSocketClient(logger, addr, true) - require.NoError(t, c.Start(ctx)) - t.Cleanup(c.Wait) - - require.True(t, s.IsRunning()) - require.True(t, c.IsRunning()) - - return s, c -} - -type slowApp struct { - types.BaseApplication -} - -func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock { - time.Sleep(200 * time.Millisecond) - return types.ResponseFinalizeBlock{} -} diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 5fea32b4e..7bfea4a4c 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -125,7 +125,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) { cmd.AddCommand(consoleCmd) cmd.AddCommand(echoCmd) cmd.AddCommand(infoCmd) - cmd.AddCommand(deliverTxCmd) + cmd.AddCommand(finalizeBlockCmd) cmd.AddCommand(checkTxCmd) cmd.AddCommand(commitCmd) cmd.AddCommand(versionCmd) @@ -150,10 +150,9 @@ where example.file looks something like: check_tx 0x00 check_tx 0xff - deliver_tx 0x00 + finalize_block 0x00 check_tx 0x00 - deliver_tx 0x01 - deliver_tx 0x04 + finalize_block 0x01 0x04 0xff info `, Args: cobra.ExactArgs(0), @@ -169,7 +168,7 @@ This command opens an interactive console for running any of the other commands without opening a new connection each time `, Args: cobra.ExactArgs(0), - ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"}, + ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"}, RunE: cmdConsole, } @@ -188,11 +187,11 @@ var infoCmd = &cobra.Command{ RunE: cmdInfo, } -var deliverTxCmd = &cobra.Command{ - Use: "deliver_tx", - Short: "deliver a new transaction to the application", - Long: "deliver a new transaction to the application", - Args: cobra.ExactArgs(1), +var finalizeBlockCmd = &cobra.Command{ + Use: "finalize_block", + Short: "deliver a block of transactions to the application", + Long: "deliver a block of transactions to the application", + Args: cobra.MinimumNArgs(1), RunE: cmdFinalizeBlock, } @@ -426,7 +425,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { return cmdCheckTx(cmd, actualArgs) case "commit": return cmdCommit(cmd, actualArgs) - case "deliver_tx": + case "finalize_block": return cmdFinalizeBlock(cmd, actualArgs) case "echo": return cmdEcho(cmd, actualArgs) @@ -500,19 +499,23 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { if len(args) == 0 { printResponse(cmd, args, response{ Code: codeBad, - Log: "want the tx", + Log: "Must provide at least one transaction", }) return nil } - txBytes, err := stringOrHexToBytes(args[0]) - if err != nil { - return err + txs := make([][]byte, len(args)) + for i, arg := range args { + txBytes, err := stringOrHexToBytes(arg) + if err != nil { + return err + } + txs[i] = txBytes } - res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) + res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs}) if err != nil { return err } - for _, tx := range res.Txs { + for _, tx := range res.TxResults { printResponse(cmd, args, response{ Code: tx.Code, Data: tx.Data, diff --git a/abci/example/example_test.go b/abci/example/example_test.go index bbe28d664..9d9d1548f 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -84,8 +84,8 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap // Send bulk request res, err := client.FinalizeBlock(ctx, rfb) require.NoError(t, err) - require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match") - for _, tx := range res.Txs { + require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match") + for _, tx := range res.TxResults { require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed") } @@ -138,8 +138,8 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type // Send request response, err := client.FinalizeBlock(ctx, &rfb) require.NoError(t, err, "Error in GRPC FinalizeBlock") - require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match") - for _, tx := range response.Txs { + require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match") + for _, tx := range response.TxResults { require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed") } } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index f295243bd..6b98d54bb 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -117,7 +117,7 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo { } // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes -func (app *Application) handleTx(tx []byte) *types.ResponseDeliverTx { +func (app *Application) handleTx(tx []byte) *types.ExecTxResult { // if it starts with "val:", update the validator set // format is "val:pubkey!power" if isValidatorTx(tx) { @@ -156,7 +156,7 @@ func (app *Application) handleTx(tx []byte) *types.ResponseDeliverTx { }, } - return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} + return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events} } func (app *Application) Close() error { @@ -190,12 +190,12 @@ func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.Resp } } - respTxs := make([]*types.ResponseDeliverTx, len(req.Txs)) + respTxs := make([]*types.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { respTxs[i] = app.handleTx(tx) } - return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates} + return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates} } func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { @@ -284,7 +284,7 @@ func (app *Application) PrepareProposal(req types.RequestPrepareProposal) types. app.mu.Lock() defer app.mu.Unlock() - return types.ResponsePrepareProposal{BlockData: app.substPrepareTx(req.BlockData)} + return types.ResponsePrepareProposal{TxRecords: app.substPrepareTx(req.Txs)} } func (*Application) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal { @@ -338,13 +338,13 @@ func isValidatorTx(tx []byte) bool { // format is "val:pubkey!power" // pubkey is a base64-encoded 32-byte ed25519 key -func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx { +func (app *Application) execValidatorTx(tx []byte) *types.ExecTxResult { tx = tx[len(ValidatorSetChangePrefix):] // get the pubkey and power pubKeyAndPower := strings.Split(string(tx), "!") if len(pubKeyAndPower) != 2 { - return &types.ResponseDeliverTx{ + return &types.ExecTxResult{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)} } @@ -353,7 +353,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx { // decode the pubkey pubkey, err := base64.StdEncoding.DecodeString(pubkeyS) if err != nil { - return &types.ResponseDeliverTx{ + return &types.ExecTxResult{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)} } @@ -361,7 +361,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx { // decode the power power, err := strconv.ParseInt(powerS, 10, 64) if err != nil { - return &types.ResponseDeliverTx{ + return &types.ExecTxResult{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Power (%s) is not an int", powerS)} } @@ -371,7 +371,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx { } // add, update, or remove a validator -func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx { +func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ExecTxResult { pubkey, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { panic(fmt.Errorf("can't decode public key: %w", err)) @@ -386,7 +386,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response } if !hasKey { pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes()) - return &types.ResponseDeliverTx{ + return &types.ExecTxResult{ Code: code.CodeTypeUnauthorized, Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)} } @@ -398,7 +398,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response // add or update validator value := bytes.NewBuffer(make([]byte, 0)) if err := types.WriteMessage(&v, value); err != nil { - return &types.ResponseDeliverTx{ + return &types.ExecTxResult{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("error encoding validator: %v", err)} } @@ -411,7 +411,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response // we only update the changes array if we successfully updated the tree app.ValUpdates = append(app.ValUpdates, v) - return &types.ResponseDeliverTx{Code: code.CodeTypeOK} + return &types.ExecTxResult{Code: code.CodeTypeOK} } // ----------------------------- @@ -420,26 +420,40 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response const PreparePrefix = "prepare" func isPrepareTx(tx []byte) bool { - return strings.HasPrefix(string(tx), PreparePrefix) + return bytes.HasPrefix(tx, []byte(PreparePrefix)) } // execPrepareTx is noop. tx data is considered as placeholder // and is substitute at the PrepareProposal. -func (app *Application) execPrepareTx(tx []byte) *types.ResponseDeliverTx { +func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult { // noop - return &types.ResponseDeliverTx{} + return &types.ExecTxResult{} } -// substPrepareTx subst all the preparetx in the blockdata -// to null string(could be any arbitrary string). -func (app *Application) substPrepareTx(blockData [][]byte) [][]byte { - // TODO: this mechanism will change with the current spec of PrepareProposal - // We now have a special type for marking a tx as changed +// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the +// proposal for transactions with the prefix strips. +// It marks all of the original transactions as 'REMOVED' so that +// Tendermint will remove them from its mempool. +func (app *Application) substPrepareTx(blockData [][]byte) []*types.TxRecord { + trs := make([]*types.TxRecord, len(blockData)) + var removed []*types.TxRecord for i, tx := range blockData { if isPrepareTx(tx) { - blockData[i] = make([]byte, len(tx)) + removed = append(removed, &types.TxRecord{ + Tx: tx, + Action: types.TxRecord_REMOVED, + }) + trs[i] = &types.TxRecord{ + Tx: bytes.TrimPrefix(tx, []byte(PreparePrefix)), + Action: types.TxRecord_ADDED, + } + continue + } + trs[i] = &types.TxRecord{ + Tx: tx, + Action: types.TxRecord_UNMODIFIED, } } - return blockData + return append(trs, removed...) } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 754027e05..002c1cb41 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -27,12 +27,12 @@ const ( func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { req := types.RequestFinalizeBlock{Txs: [][]byte{tx}} ar := app.FinalizeBlock(req) - require.Equal(t, 1, len(ar.Txs)) - require.False(t, ar.Txs[0].IsErr()) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) // repeating tx doesn't raise error ar = app.FinalizeBlock(req) - require.Equal(t, 1, len(ar.Txs)) - require.False(t, ar.Txs[0].IsErr()) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) // commit app.Commit() @@ -107,7 +107,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { header := tmproto.Header{ Height: height, } - kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height}) + kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header}) kvstore.Commit() resInfo = kvstore.Info(types.RequestInfo{}) @@ -196,7 +196,6 @@ func makeApplyBlock( resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{ Hash: hash, Header: header, - Height: height, Txs: txs, }) @@ -326,13 +325,13 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) { ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) require.NoError(t, err) - require.Equal(t, 1, len(ar.Txs)) - require.False(t, ar.Txs[0].IsErr()) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) // repeating FinalizeBlock doesn't raise error ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) require.NoError(t, err) - require.Equal(t, 1, len(ar.Txs)) - require.False(t, ar.Txs[0].IsErr()) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) // commit _, err = app.Commit(ctx) require.NoError(t, err) diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 4bdaf5b0e..9273e8046 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -51,7 +51,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error { res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes}) - for i, tx := range res.Txs { + for i, tx := range res.TxResults { code, data, log := tx.Code, tx.Data, tx.Log if code != codeExp[i] { fmt.Println("Failed test: FinalizeBlock") diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci index e909266ec..09457189e 100644 --- a/abci/tests/test_cli/ex1.abci +++ b/abci/tests/test_cli/ex1.abci @@ -1,10 +1,10 @@ echo hello info commit -deliver_tx "abc" +finalize_block "abc" info commit query "abc" -deliver_tx "def=xyz" +finalize_block "def=xyz" "ghi=123" commit query "def" diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 9e702b5ce..c004ab059 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -12,7 +12,7 @@ -> code: OK -> data.hex: 0x0000000000000000 -> deliver_tx "abc" +> finalize_block "abc" -> code: OK > info @@ -33,12 +33,14 @@ -> value: abc -> value.hex: 616263 -> deliver_tx "def=xyz" +> finalize_block "def=xyz" "ghi=123" +-> code: OK +> finalize_block "def=xyz" "ghi=123" -> code: OK > commit -> code: OK --> data.hex: 0x0400000000000000 +-> data.hex: 0x0600000000000000 > query "def" -> code: OK diff --git a/abci/tests/test_cli/ex2.abci b/abci/tests/test_cli/ex2.abci index 965ca842c..90e99c2f9 100644 --- a/abci/tests/test_cli/ex2.abci +++ b/abci/tests/test_cli/ex2.abci @@ -1,7 +1,7 @@ check_tx 0x00 check_tx 0xff -deliver_tx 0x00 +finalize_block 0x00 check_tx 0x00 -deliver_tx 0x01 -deliver_tx 0x04 +finalize_block 0x01 +finalize_block 0x04 info diff --git a/abci/tests/test_cli/ex2.abci.out b/abci/tests/test_cli/ex2.abci.out index 7ef8abbc4..aab0b1966 100644 --- a/abci/tests/test_cli/ex2.abci.out +++ b/abci/tests/test_cli/ex2.abci.out @@ -4,20 +4,20 @@ > check_tx 0xff -> code: OK -> deliver_tx 0x00 +> finalize_block 0x00 -> code: OK > check_tx 0x00 -> code: OK -> deliver_tx 0x01 +> finalize_block 0x01 -> code: OK -> deliver_tx 0x04 +> finalize_block 0x04 -> code: OK > info -> code: OK --> data: {"hashes":0,"txs":3} --> data.hex: 0x7B22686173686573223A302C22747873223A337D +-> data: {"size":3} +-> data.hex: 0x7B2273697A65223A337D diff --git a/abci/types/application.go b/abci/types/application.go index 389de354e..6961ea200 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -103,12 +103,12 @@ func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProce } func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock { - txs := make([]*ResponseDeliverTx, len(req.Txs)) + txs := make([]*ExecTxResult, len(req.Txs)) for i := range req.Txs { - txs[i] = &ResponseDeliverTx{Code: CodeTypeOK} + txs[i] = &ExecTxResult{Code: CodeTypeOK} } return ResponseFinalizeBlock{ - Txs: txs, + TxResults: txs, } } diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index fb219fe07..4f17f9f83 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -13,7 +13,7 @@ import ( ) func TestMarshalJSON(t *testing.T) { - b, err := json.Marshal(&ResponseDeliverTx{}) + b, err := json.Marshal(&ExecTxResult{Code: 1}) assert.NoError(t, err) // include empty fields. assert.True(t, strings.Contains(string(b), "code")) diff --git a/abci/types/result.go b/abci/types/types.go similarity index 79% rename from abci/types/result.go rename to abci/types/types.go index d899b771a..4240301b5 100644 --- a/abci/types/result.go +++ b/abci/types/types.go @@ -33,6 +33,16 @@ func (r ResponseDeliverTx) IsErr() bool { return r.Code != CodeTypeOK } +// IsOK returns true if Code is OK. +func (r ExecTxResult) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ExecTxResult) IsErr() bool { + return r.Code != CodeTypeOK +} + // IsOK returns true if Code is OK. func (r ResponseQuery) IsOK() bool { return r.Code == CodeTypeOK @@ -157,3 +167,31 @@ func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension { Result: result, } } + +// deterministicExecTxResult constructs a copy of response that omits +// non-deterministic fields. The input response is not modified. +func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult { + return &ExecTxResult{ + Code: response.Code, + Data: response.Data, + GasWanted: response.GasWanted, + GasUsed: response.GasUsed, + } +} + +// MarshalTxResults encodes the the TxResults as a list of byte +// slices. It strips off the non-deterministic pieces of the TxResults +// so that the resulting data can be used for hash comparisons and used +// in Merkle proofs. +func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) { + s := make([][]byte, len(r)) + for i, e := range r { + d := deterministicExecTxResult(e) + b, err := d.Marshal() + if err != nil { + return nil, err + } + s[i] = b + } + return s, nil +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 095f8c00d..b42c1e0bf 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -11,7 +11,7 @@ import ( _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" - types "github.com/tendermint/tendermint/proto/tendermint/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -188,7 +188,39 @@ func (x ResponseVerifyVoteExtension_Result) String() string { } func (ResponseVerifyVoteExtension_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38, 0} + return fileDescriptor_252557cfdd89a31a, []int{37, 0} +} + +// TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal +type TxRecord_TxAction int32 + +const ( + TxRecord_UNKNOWN TxRecord_TxAction = 0 + TxRecord_UNMODIFIED TxRecord_TxAction = 1 + TxRecord_ADDED TxRecord_TxAction = 2 + TxRecord_REMOVED TxRecord_TxAction = 3 +) + +var TxRecord_TxAction_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNMODIFIED", + 2: "ADDED", + 3: "REMOVED", +} + +var TxRecord_TxAction_value = map[string]int32{ + "UNKNOWN": 0, + "UNMODIFIED": 1, + "ADDED": 2, + "REMOVED": 3, +} + +func (x TxRecord_TxAction) String() string { + return proto.EnumName(TxRecord_TxAction_name, int32(x)) +} + +func (TxRecord_TxAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{47, 0} } type Request struct { @@ -374,6 +406,7 @@ func (m *Request) GetQuery() *RequestQuery { return nil } +// Deprecated: Do not use. func (m *Request) GetBeginBlock() *RequestBeginBlock { if x, ok := m.GetValue().(*Request_BeginBlock); ok { return x.BeginBlock @@ -388,6 +421,7 @@ func (m *Request) GetCheckTx() *RequestCheckTx { return nil } +// Deprecated: Do not use. func (m *Request) GetDeliverTx() *RequestDeliverTx { if x, ok := m.GetValue().(*Request_DeliverTx); ok { return x.DeliverTx @@ -395,6 +429,7 @@ func (m *Request) GetDeliverTx() *RequestDeliverTx { return nil } +// Deprecated: Do not use. func (m *Request) GetEndBlock() *RequestEndBlock { if x, ok := m.GetValue().(*Request_EndBlock); ok { return x.EndBlock @@ -541,74 +576,6 @@ func (m *RequestEcho) GetMessage() string { return "" } -type RequestBeginBlock struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header types.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` -} - -func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } -func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } -func (*RequestBeginBlock) ProtoMessage() {} -func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{2} -} -func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBeginBlock.Merge(m, src) -} -func (m *RequestBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo - -func (m *RequestBeginBlock) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *RequestBeginBlock) GetHeader() types.Header { - if m != nil { - return m.Header - } - return types.Header{} -} - -func (m *RequestBeginBlock) GetLastCommitInfo() LastCommitInfo { - if m != nil { - return m.LastCommitInfo - } - return LastCommitInfo{} -} - -func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { - if m != nil { - return m.ByzantineValidators - } - return nil -} - type RequestFlush struct { } @@ -616,7 +583,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{3} + return fileDescriptor_252557cfdd89a31a, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -656,7 +623,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{4} + return fileDescriptor_252557cfdd89a31a, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -714,19 +681,19 @@ func (m *RequestInfo) GetAbciVersion() string { } type RequestInitChain struct { - Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - ConsensusParams *types.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` - AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` - InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *types1.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` } func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{5} + return fileDescriptor_252557cfdd89a31a, []int{4} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -769,7 +736,7 @@ func (m *RequestInitChain) GetChainId() string { return "" } -func (m *RequestInitChain) GetConsensusParams() *types.ConsensusParams { +func (m *RequestInitChain) GetConsensusParams() *types1.ConsensusParams { if m != nil { return m.ConsensusParams } @@ -808,7 +775,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{6} + return fileDescriptor_252557cfdd89a31a, []int{5} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -865,22 +832,25 @@ func (m *RequestQuery) GetProve() bool { return false } -type RequestDeliverTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +type RequestBeginBlock struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + LastCommitInfo CommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` + ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` } -func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } -func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } -func (*RequestDeliverTx) ProtoMessage() {} -func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{7} +func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } +func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } +func (*RequestBeginBlock) ProtoMessage() {} +func (*RequestBeginBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{6} } -func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { +func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) + return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -890,67 +860,44 @@ func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestDeliverTx.Merge(m, src) +func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBeginBlock.Merge(m, src) } -func (m *RequestDeliverTx) XXX_Size() int { +func (m *RequestBeginBlock) XXX_Size() int { return m.Size() } -func (m *RequestDeliverTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestDeliverTx.DiscardUnknown(m) +func (m *RequestBeginBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) } -var xxx_messageInfo_RequestDeliverTx proto.InternalMessageInfo +var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo -func (m *RequestDeliverTx) GetTx() []byte { +func (m *RequestBeginBlock) GetHash() []byte { if m != nil { - return m.Tx + return m.Hash } return nil } -type RequestEndBlock struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +func (m *RequestBeginBlock) GetHeader() types1.Header { + if m != nil { + return m.Header + } + return types1.Header{} } -func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } -func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } -func (*RequestEndBlock) ProtoMessage() {} -func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{8} -} -func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *RequestBeginBlock) GetLastCommitInfo() CommitInfo { + if m != nil { + return m.LastCommitInfo } + return CommitInfo{} } -func (m *RequestEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEndBlock.Merge(m, src) -} -func (m *RequestEndBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestEndBlock proto.InternalMessageInfo -func (m *RequestEndBlock) GetHeight() int64 { +func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { if m != nil { - return m.Height + return m.ByzantineValidators } - return 0 + return nil } type RequestCheckTx struct { @@ -962,7 +909,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{9} + return fileDescriptor_252557cfdd89a31a, []int{7} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1005,6 +952,94 @@ func (m *RequestCheckTx) GetType() CheckTxType { return CheckTxType_New } +type RequestDeliverTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } +func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } +func (*RequestDeliverTx) ProtoMessage() {} +func (*RequestDeliverTx) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{8} +} +func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestDeliverTx.Merge(m, src) +} +func (m *RequestDeliverTx) XXX_Size() int { + return m.Size() +} +func (m *RequestDeliverTx) XXX_DiscardUnknown() { + xxx_messageInfo_RequestDeliverTx.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestDeliverTx proto.InternalMessageInfo + +func (m *RequestDeliverTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +type RequestEndBlock struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } +func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } +func (*RequestEndBlock) ProtoMessage() {} +func (*RequestEndBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{9} +} +func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestEndBlock.Merge(m, src) +} +func (m *RequestEndBlock) XXX_Size() int { + return m.Size() +} +func (m *RequestEndBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestEndBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestEndBlock proto.InternalMessageInfo + +func (m *RequestEndBlock) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + type RequestCommit struct { } @@ -1253,82 +1288,16 @@ func (m *RequestApplySnapshotChunk) GetSender() string { return "" } -type RequestPrepareProposal struct { - // block_data is an array of transactions that will be included in a block, - // sent to the app for possible modifications. - // applications can not exceed the size of the data passed to it. - BlockData [][]byte `protobuf:"bytes,1,rep,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` - // If an application decides to populate block_data with extra information, they can not exceed this value. - BlockDataSize int64 `protobuf:"varint,2,opt,name=block_data_size,json=blockDataSize,proto3" json:"block_data_size,omitempty"` - // votes includes all votes from the previous block. This contains vote extension data that can be used in proposal - // preparation. The votes here will then form the last commit that gets sent in the proposed block. - Votes []*types.Vote `protobuf:"bytes,3,rep,name=votes,proto3" json:"votes,omitempty"` -} - -func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } -func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } -func (*RequestPrepareProposal) ProtoMessage() {} -func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{15} -} -func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPrepareProposal.Merge(m, src) -} -func (m *RequestPrepareProposal) XXX_Size() int { - return m.Size() -} -func (m *RequestPrepareProposal) XXX_DiscardUnknown() { - xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo - -func (m *RequestPrepareProposal) GetBlockData() [][]byte { - if m != nil { - return m.BlockData - } - return nil -} - -func (m *RequestPrepareProposal) GetBlockDataSize() int64 { - if m != nil { - return m.BlockDataSize - } - return 0 -} - -func (m *RequestPrepareProposal) GetVotes() []*types.Vote { - if m != nil { - return m.Votes - } - return nil -} - -// Extends a vote with application-side injection -type RequestExtendVote struct { - Vote *types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` +// Extends a vote with application-side injection +type RequestExtendVote struct { + Vote *types1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` } func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } func (*RequestExtendVote) ProtoMessage() {} func (*RequestExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{15} } func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1357,7 +1326,7 @@ func (m *RequestExtendVote) XXX_DiscardUnknown() { var xxx_messageInfo_RequestExtendVote proto.InternalMessageInfo -func (m *RequestExtendVote) GetVote() *types.Vote { +func (m *RequestExtendVote) GetVote() *types1.Vote { if m != nil { return m.Vote } @@ -1366,14 +1335,14 @@ func (m *RequestExtendVote) GetVote() *types.Vote { // Verify the vote extension type RequestVerifyVoteExtension struct { - Vote *types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` + Vote *types1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` } func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExtension{} } func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*RequestVerifyVoteExtension) ProtoMessage() {} func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_252557cfdd89a31a, []int{16} } func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1402,19 +1371,106 @@ func (m *RequestVerifyVoteExtension) XXX_DiscardUnknown() { var xxx_messageInfo_RequestVerifyVoteExtension proto.InternalMessageInfo -func (m *RequestVerifyVoteExtension) GetVote() *types.Vote { +func (m *RequestVerifyVoteExtension) GetVote() *types1.Vote { if m != nil { return m.Vote } return nil } +type RequestPrepareProposal struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + Txs [][]byte `protobuf:"bytes,3,rep,name=txs,proto3" json:"txs,omitempty"` + LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,4,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` + ByzantineValidators []Evidence `protobuf:"bytes,5,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + // the modified transactions cannot exceed this size. + MaxTxBytes int64 `protobuf:"varint,6,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` +} + +func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } +func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } +func (*RequestPrepareProposal) ProtoMessage() {} +func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{17} +} +func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPrepareProposal.Merge(m, src) +} +func (m *RequestPrepareProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestPrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo + +func (m *RequestPrepareProposal) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestPrepareProposal) GetHeader() types1.Header { + if m != nil { + return m.Header + } + return types1.Header{} +} + +func (m *RequestPrepareProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { + if m != nil { + return m.LocalLastCommit + } + return ExtendedCommitInfo{} +} + +func (m *RequestPrepareProposal) GetByzantineValidators() []Evidence { + if m != nil { + return m.ByzantineValidators + } + return nil +} + +func (m *RequestPrepareProposal) GetMaxTxBytes() int64 { + if m != nil { + return m.MaxTxBytes + } + return 0 +} + type RequestProcessProposal struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header types.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - Txs [][]byte `protobuf:"bytes,3,rep,name=txs,proto3" json:"txs,omitempty"` - LastCommitInfo LastCommitInfo `protobuf:"bytes,4,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Evidence `protobuf:"bytes,5,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + Txs [][]byte `protobuf:"bytes,3,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,4,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + ByzantineValidators []Evidence `protobuf:"bytes,5,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` } func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } @@ -1457,11 +1513,11 @@ func (m *RequestProcessProposal) GetHash() []byte { return nil } -func (m *RequestProcessProposal) GetHeader() types.Header { +func (m *RequestProcessProposal) GetHeader() types1.Header { if m != nil { return m.Header } - return types.Header{} + return types1.Header{} } func (m *RequestProcessProposal) GetTxs() [][]byte { @@ -1471,11 +1527,11 @@ func (m *RequestProcessProposal) GetTxs() [][]byte { return nil } -func (m *RequestProcessProposal) GetLastCommitInfo() LastCommitInfo { +func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { if m != nil { - return m.LastCommitInfo + return m.ProposedLastCommit } - return LastCommitInfo{} + return CommitInfo{} } func (m *RequestProcessProposal) GetByzantineValidators() []Evidence { @@ -1486,12 +1542,11 @@ func (m *RequestProcessProposal) GetByzantineValidators() []Evidence { } type RequestFinalizeBlock struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Header types.Header `protobuf:"bytes,4,opt,name=header,proto3" json:"header"` - LastCommitInfo LastCommitInfo `protobuf:"bytes,5,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Evidence `protobuf:"bytes,6,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + Txs [][]byte `protobuf:"bytes,3,rep,name=txs,proto3" json:"txs,omitempty"` + DecidedLastCommit CommitInfo `protobuf:"bytes,4,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` + ByzantineValidators []Evidence `protobuf:"bytes,5,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` } func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } @@ -1527,13 +1582,6 @@ func (m *RequestFinalizeBlock) XXX_DiscardUnknown() { var xxx_messageInfo_RequestFinalizeBlock proto.InternalMessageInfo -func (m *RequestFinalizeBlock) GetTxs() [][]byte { - if m != nil { - return m.Txs - } - return nil -} - func (m *RequestFinalizeBlock) GetHash() []byte { if m != nil { return m.Hash @@ -1541,25 +1589,25 @@ func (m *RequestFinalizeBlock) GetHash() []byte { return nil } -func (m *RequestFinalizeBlock) GetHeight() int64 { +func (m *RequestFinalizeBlock) GetHeader() types1.Header { if m != nil { - return m.Height + return m.Header } - return 0 + return types1.Header{} } -func (m *RequestFinalizeBlock) GetHeader() types.Header { +func (m *RequestFinalizeBlock) GetTxs() [][]byte { if m != nil { - return m.Header + return m.Txs } - return types.Header{} + return nil } -func (m *RequestFinalizeBlock) GetLastCommitInfo() LastCommitInfo { +func (m *RequestFinalizeBlock) GetDecidedLastCommit() CommitInfo { if m != nil { - return m.LastCommitInfo + return m.DecidedLastCommit } - return LastCommitInfo{} + return CommitInfo{} } func (m *RequestFinalizeBlock) GetByzantineValidators() []Evidence { @@ -1764,6 +1812,7 @@ func (m *Response) GetQuery() *ResponseQuery { return nil } +// Deprecated: Do not use. func (m *Response) GetBeginBlock() *ResponseBeginBlock { if x, ok := m.GetValue().(*Response_BeginBlock); ok { return x.BeginBlock @@ -1778,6 +1827,7 @@ func (m *Response) GetCheckTx() *ResponseCheckTx { return nil } +// Deprecated: Do not use. func (m *Response) GetDeliverTx() *ResponseDeliverTx { if x, ok := m.GetValue().(*Response_DeliverTx); ok { return x.DeliverTx @@ -1785,6 +1835,7 @@ func (m *Response) GetDeliverTx() *ResponseDeliverTx { return nil } +// Deprecated: Do not use. func (m *Response) GetEndBlock() *ResponseEndBlock { if x, ok := m.GetValue().(*Response_EndBlock); ok { return x.EndBlock @@ -2091,9 +2142,9 @@ func (m *ResponseInfo) GetLastBlockAppHash() []byte { } type ResponseInitChain struct { - ConsensusParams *types.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` - AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + ConsensusParams *types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` } func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } @@ -2129,7 +2180,7 @@ func (m *ResponseInitChain) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseInitChain proto.InternalMessageInfo -func (m *ResponseInitChain) GetConsensusParams() *types.ConsensusParams { +func (m *ResponseInitChain) GetConsensusParams() *types1.ConsensusParams { if m != nil { return m.ConsensusParams } @@ -2308,13 +2359,12 @@ type ResponseCheckTx struct { Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` - // mempool_error is set by Tendermint. // ABCI applications creating a ResponseCheckTX should not set mempool_error. MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } @@ -2530,9 +2580,9 @@ func (m *ResponseDeliverTx) GetCodespace() string { } type ResponseEndBlock struct { - ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` - ConsensusParamUpdates *types.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` } func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } @@ -2575,7 +2625,7 @@ func (m *ResponseEndBlock) GetValidatorUpdates() []ValidatorUpdate { return nil } -func (m *ResponseEndBlock) GetConsensusParamUpdates() *types.ConsensusParams { +func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { if m != nil { return m.ConsensusParamUpdates } @@ -2834,59 +2884,15 @@ func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { return nil } -type ResponsePrepareProposal struct { - BlockData [][]byte `protobuf:"bytes,1,rep,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` -} - -func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } -func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } -func (*ResponsePrepareProposal) ProtoMessage() {} -func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} -} -func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponsePrepareProposal.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponsePrepareProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePrepareProposal.Merge(m, src) -} -func (m *ResponsePrepareProposal) XXX_Size() int { - return m.Size() -} -func (m *ResponsePrepareProposal) XXX_DiscardUnknown() { - xxx_messageInfo_ResponsePrepareProposal.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponsePrepareProposal proto.InternalMessageInfo - -func (m *ResponsePrepareProposal) GetBlockData() [][]byte { - if m != nil { - return m.BlockData - } - return nil -} - type ResponseExtendVote struct { - VoteExtension *types.VoteExtension `protobuf:"bytes,1,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` + VoteExtension *types1.VoteExtension `protobuf:"bytes,1,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } func (*ResponseExtendVote) ProtoMessage() {} func (*ResponseExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{36} } func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2915,7 +2921,7 @@ func (m *ResponseExtendVote) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseExtendVote proto.InternalMessageInfo -func (m *ResponseExtendVote) GetVoteExtension() *types.VoteExtension { +func (m *ResponseExtendVote) GetVoteExtension() *types1.VoteExtension { if m != nil { return m.VoteExtension } @@ -2930,7 +2936,7 @@ func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteE func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*ResponseVerifyVoteExtension) ProtoMessage() {} func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_252557cfdd89a31a, []int{37} } func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2966,26 +2972,27 @@ func (m *ResponseVerifyVoteExtension) GetResult() ResponseVerifyVoteExtension_Re return ResponseVerifyVoteExtension_UNKNOWN } -type ResponseProcessProposal struct { - Accept bool `protobuf:"varint,1,opt,name=accept,proto3" json:"accept,omitempty"` - AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - TxResults []*ExecTxResult `protobuf:"bytes,3,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` - ValidatorUpdates []*ValidatorUpdate `protobuf:"bytes,4,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates,omitempty"` - ConsensusParamUpdates *types.ConsensusParams `protobuf:"bytes,5,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` +type ResponsePrepareProposal struct { + ModifiedTx bool `protobuf:"varint,1,opt,name=modified_tx,json=modifiedTx,proto3" json:"modified_tx,omitempty"` + TxRecords []*TxRecord `protobuf:"bytes,2,rep,name=tx_records,json=txRecords,proto3" json:"tx_records,omitempty"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + TxResults []*ExecTxResult `protobuf:"bytes,4,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + ValidatorUpdates []*ValidatorUpdate `protobuf:"bytes,5,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates,omitempty"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,6,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` } -func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } -func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } -func (*ResponseProcessProposal) ProtoMessage() {} -func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} +func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } +func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } +func (*ResponsePrepareProposal) ProtoMessage() {} +func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{38} } -func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { +func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseProcessProposal.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponsePrepareProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2995,72 +3002,80 @@ func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *ResponseProcessProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseProcessProposal.Merge(m, src) +func (m *ResponsePrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePrepareProposal.Merge(m, src) } -func (m *ResponseProcessProposal) XXX_Size() int { +func (m *ResponsePrepareProposal) XXX_Size() int { return m.Size() } -func (m *ResponseProcessProposal) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseProcessProposal.DiscardUnknown(m) +func (m *ResponsePrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePrepareProposal.DiscardUnknown(m) } -var xxx_messageInfo_ResponseProcessProposal proto.InternalMessageInfo +var xxx_messageInfo_ResponsePrepareProposal proto.InternalMessageInfo -func (m *ResponseProcessProposal) GetAccept() bool { +func (m *ResponsePrepareProposal) GetModifiedTx() bool { if m != nil { - return m.Accept + return m.ModifiedTx } return false } -func (m *ResponseProcessProposal) GetAppHash() []byte { +func (m *ResponsePrepareProposal) GetTxRecords() []*TxRecord { + if m != nil { + return m.TxRecords + } + return nil +} + +func (m *ResponsePrepareProposal) GetAppHash() []byte { if m != nil { return m.AppHash } return nil } -func (m *ResponseProcessProposal) GetTxResults() []*ExecTxResult { +func (m *ResponsePrepareProposal) GetTxResults() []*ExecTxResult { if m != nil { return m.TxResults } return nil } -func (m *ResponseProcessProposal) GetValidatorUpdates() []*ValidatorUpdate { +func (m *ResponsePrepareProposal) GetValidatorUpdates() []*ValidatorUpdate { if m != nil { return m.ValidatorUpdates } return nil } -func (m *ResponseProcessProposal) GetConsensusParamUpdates() *types.ConsensusParams { +func (m *ResponsePrepareProposal) GetConsensusParamUpdates() *types1.ConsensusParams { if m != nil { return m.ConsensusParamUpdates } return nil } -type ResponseFinalizeBlock struct { - Txs []*ResponseDeliverTx `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,2,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` - ConsensusParamUpdates *types.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` +type ResponseProcessProposal struct { + Accept bool `protobuf:"varint,1,opt,name=accept,proto3" json:"accept,omitempty"` + AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + TxResults []*ExecTxResult `protobuf:"bytes,3,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + ValidatorUpdates []*ValidatorUpdate `protobuf:"bytes,4,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates,omitempty"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,5,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` } -func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } -func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseFinalizeBlock) ProtoMessage() {} -func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} +func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } +func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } +func (*ResponseProcessProposal) ProtoMessage() {} +func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{39} } -func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { +func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseFinalizeBlock.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseProcessProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -3070,39 +3085,95 @@ func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byt return b[:n], nil } } -func (m *ResponseFinalizeBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseFinalizeBlock.Merge(m, src) +func (m *ResponseProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseProcessProposal.Merge(m, src) } -func (m *ResponseFinalizeBlock) XXX_Size() int { +func (m *ResponseProcessProposal) XXX_Size() int { return m.Size() } -func (m *ResponseFinalizeBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseFinalizeBlock.DiscardUnknown(m) +func (m *ResponseProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseProcessProposal.DiscardUnknown(m) } -var xxx_messageInfo_ResponseFinalizeBlock proto.InternalMessageInfo +var xxx_messageInfo_ResponseProcessProposal proto.InternalMessageInfo -func (m *ResponseFinalizeBlock) GetTxs() []*ResponseDeliverTx { +func (m *ResponseProcessProposal) GetAccept() bool { if m != nil { - return m.Txs + return m.Accept + } + return false +} + +func (m *ResponseProcessProposal) GetAppHash() []byte { + if m != nil { + return m.AppHash } return nil } -func (m *ResponseFinalizeBlock) GetValidatorUpdates() []ValidatorUpdate { +func (m *ResponseProcessProposal) GetTxResults() []*ExecTxResult { + if m != nil { + return m.TxResults + } + return nil +} + +func (m *ResponseProcessProposal) GetValidatorUpdates() []*ValidatorUpdate { if m != nil { return m.ValidatorUpdates } return nil } -func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *types.ConsensusParams { +func (m *ResponseProcessProposal) GetConsensusParamUpdates() *types1.ConsensusParams { if m != nil { return m.ConsensusParamUpdates } return nil } +type ResponseFinalizeBlock struct { + Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + TxResults []*ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,3,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + RetainHeight int64 `protobuf:"varint,6,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` +} + +func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } +func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseFinalizeBlock) ProtoMessage() {} +func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{40} +} +func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseFinalizeBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseFinalizeBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFinalizeBlock.Merge(m, src) +} +func (m *ResponseFinalizeBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseFinalizeBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseFinalizeBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseFinalizeBlock proto.InternalMessageInfo + func (m *ResponseFinalizeBlock) GetEvents() []Event { if m != nil { return m.Events @@ -3110,23 +3181,110 @@ func (m *ResponseFinalizeBlock) GetEvents() []Event { return nil } -type LastCommitInfo struct { +func (m *ResponseFinalizeBlock) GetTxResults() []*ExecTxResult { + if m != nil { + return m.TxResults + } + return nil +} + +func (m *ResponseFinalizeBlock) GetValidatorUpdates() []ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *types1.ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseFinalizeBlock) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight + } + return 0 +} + +type CommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` } -func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } -func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } -func (*LastCommitInfo) ProtoMessage() {} -func (*LastCommitInfo) Descriptor() ([]byte, []int) { +func (m *CommitInfo) Reset() { *m = CommitInfo{} } +func (m *CommitInfo) String() string { return proto.CompactTextString(m) } +func (*CommitInfo) ProtoMessage() {} +func (*CommitInfo) Descriptor() ([]byte, []int) { return fileDescriptor_252557cfdd89a31a, []int{41} } -func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { +func (m *CommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitInfo.Merge(m, src) +} +func (m *CommitInfo) XXX_Size() int { + return m.Size() +} +func (m *CommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitInfo proto.InternalMessageInfo + +func (m *CommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CommitInfo) GetVotes() []VoteInfo { + if m != nil { + return m.Votes + } + return nil +} + +type ExtendedCommitInfo struct { + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []ExtendedVoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +} + +func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } +func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedCommitInfo) ProtoMessage() {} +func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{42} +} +func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ExtendedCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_LastCommitInfo.Marshal(b, m, deterministic) + return xxx_messageInfo_ExtendedCommitInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -3136,26 +3294,26 @@ func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *LastCommitInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_LastCommitInfo.Merge(m, src) +func (m *ExtendedCommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedCommitInfo.Merge(m, src) } -func (m *LastCommitInfo) XXX_Size() int { +func (m *ExtendedCommitInfo) XXX_Size() int { return m.Size() } -func (m *LastCommitInfo) XXX_DiscardUnknown() { - xxx_messageInfo_LastCommitInfo.DiscardUnknown(m) +func (m *ExtendedCommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedCommitInfo.DiscardUnknown(m) } -var xxx_messageInfo_LastCommitInfo proto.InternalMessageInfo +var xxx_messageInfo_ExtendedCommitInfo proto.InternalMessageInfo -func (m *LastCommitInfo) GetRound() int32 { +func (m *ExtendedCommitInfo) GetRound() int32 { if m != nil { return m.Round } return 0 } -func (m *LastCommitInfo) GetVotes() []VoteInfo { +func (m *ExtendedCommitInfo) GetVotes() []ExtendedVoteInfo { if m != nil { return m.Votes } @@ -3174,7 +3332,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{42} + return fileDescriptor_252557cfdd89a31a, []int{43} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3228,7 +3386,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{43} + return fileDescriptor_252557cfdd89a31a, []int{44} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3288,7 +3446,7 @@ type ExecTxResult struct { Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - TxEvents []Event `protobuf:"bytes,7,rep,name=tx_events,json=txEvents,proto3" json:"events,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` } @@ -3296,7 +3454,7 @@ func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } func (*ExecTxResult) ProtoMessage() {} func (*ExecTxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{44} + return fileDescriptor_252557cfdd89a31a, []int{45} } func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3367,9 +3525,9 @@ func (m *ExecTxResult) GetGasUsed() int64 { return 0 } -func (m *ExecTxResult) GetTxEvents() []Event { +func (m *ExecTxResult) GetEvents() []Event { if m != nil { - return m.TxEvents + return m.Events } return nil } @@ -3385,17 +3543,17 @@ func (m *ExecTxResult) GetCodespace() string { // // One usage is indexing transaction results. type TxResult struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` - Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` - Result ResponseDeliverTx `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` + Result ExecTxResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` } func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{45} + return fileDescriptor_252557cfdd89a31a, []int{46} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3445,11 +3603,63 @@ func (m *TxResult) GetTx() []byte { return nil } -func (m *TxResult) GetResult() ResponseDeliverTx { +func (m *TxResult) GetResult() ExecTxResult { if m != nil { return m.Result } - return ResponseDeliverTx{} + return ExecTxResult{} +} + +type TxRecord struct { + Action TxRecord_TxAction `protobuf:"varint,1,opt,name=action,proto3,enum=tendermint.abci.TxRecord_TxAction" json:"action,omitempty"` + Tx []byte `protobuf:"bytes,2,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *TxRecord) Reset() { *m = TxRecord{} } +func (m *TxRecord) String() string { return proto.CompactTextString(m) } +func (*TxRecord) ProtoMessage() {} +func (*TxRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{47} +} +func (m *TxRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxRecord.Merge(m, src) +} +func (m *TxRecord) XXX_Size() int { + return m.Size() +} +func (m *TxRecord) XXX_DiscardUnknown() { + xxx_messageInfo_TxRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_TxRecord proto.InternalMessageInfo + +func (m *TxRecord) GetAction() TxRecord_TxAction { + if m != nil { + return m.Action + } + return TxRecord_UNKNOWN +} + +func (m *TxRecord) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil } // Validator @@ -3463,7 +3673,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{46} + return fileDescriptor_252557cfdd89a31a, []int{48} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3516,7 +3726,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{47} + return fileDescriptor_252557cfdd89a31a, []int{49} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3569,7 +3779,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{48} + return fileDescriptor_252557cfdd89a31a, []int{50} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3612,6 +3822,66 @@ func (m *VoteInfo) GetSignedLastBlock() bool { return false } +type ExtendedVoteInfo struct { + Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` + VoteExtension []byte `protobuf:"bytes,3,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` +} + +func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } +func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedVoteInfo) ProtoMessage() {} +func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{51} +} +func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedVoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedVoteInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExtendedVoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedVoteInfo.Merge(m, src) +} +func (m *ExtendedVoteInfo) XXX_Size() int { + return m.Size() +} +func (m *ExtendedVoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedVoteInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendedVoteInfo proto.InternalMessageInfo + +func (m *ExtendedVoteInfo) GetValidator() Validator { + if m != nil { + return m.Validator + } + return Validator{} +} + +func (m *ExtendedVoteInfo) GetSignedLastBlock() bool { + if m != nil { + return m.SignedLastBlock + } + return false +} + +func (m *ExtendedVoteInfo) GetVoteExtension() []byte { + if m != nil { + return m.VoteExtension + } + return nil +} + type Evidence struct { Type EvidenceType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.abci.EvidenceType" json:"type,omitempty"` // The offending validator @@ -3630,7 +3900,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{49} + return fileDescriptor_252557cfdd89a31a, []int{52} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3706,7 +3976,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{50} + return fileDescriptor_252557cfdd89a31a, []int{53} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,24 +4046,25 @@ func init() { proto.RegisterEnum("tendermint.abci.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) proto.RegisterEnum("tendermint.abci.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) proto.RegisterEnum("tendermint.abci.ResponseVerifyVoteExtension_Result", ResponseVerifyVoteExtension_Result_name, ResponseVerifyVoteExtension_Result_value) + proto.RegisterEnum("tendermint.abci.TxRecord_TxAction", TxRecord_TxAction_name, TxRecord_TxAction_value) proto.RegisterType((*Request)(nil), "tendermint.abci.Request") proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.RequestEcho") - proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.RequestFlush") proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") + proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") + proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") - proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.RequestLoadSnapshotChunk") proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.RequestApplySnapshotChunk") - proto.RegisterType((*RequestPrepareProposal)(nil), "tendermint.abci.RequestPrepareProposal") proto.RegisterType((*RequestExtendVote)(nil), "tendermint.abci.RequestExtendVote") proto.RegisterType((*RequestVerifyVoteExtension)(nil), "tendermint.abci.RequestVerifyVoteExtension") + proto.RegisterType((*RequestPrepareProposal)(nil), "tendermint.abci.RequestPrepareProposal") proto.RegisterType((*RequestProcessProposal)(nil), "tendermint.abci.RequestProcessProposal") proto.RegisterType((*RequestFinalizeBlock)(nil), "tendermint.abci.RequestFinalizeBlock") proto.RegisterType((*Response)(nil), "tendermint.abci.Response") @@ -3812,19 +4083,22 @@ func init() { proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.ResponseLoadSnapshotChunk") proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.ResponseApplySnapshotChunk") - proto.RegisterType((*ResponsePrepareProposal)(nil), "tendermint.abci.ResponsePrepareProposal") proto.RegisterType((*ResponseExtendVote)(nil), "tendermint.abci.ResponseExtendVote") proto.RegisterType((*ResponseVerifyVoteExtension)(nil), "tendermint.abci.ResponseVerifyVoteExtension") + proto.RegisterType((*ResponsePrepareProposal)(nil), "tendermint.abci.ResponsePrepareProposal") proto.RegisterType((*ResponseProcessProposal)(nil), "tendermint.abci.ResponseProcessProposal") proto.RegisterType((*ResponseFinalizeBlock)(nil), "tendermint.abci.ResponseFinalizeBlock") - proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.LastCommitInfo") + proto.RegisterType((*CommitInfo)(nil), "tendermint.abci.CommitInfo") + proto.RegisterType((*ExtendedCommitInfo)(nil), "tendermint.abci.ExtendedCommitInfo") proto.RegisterType((*Event)(nil), "tendermint.abci.Event") proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") proto.RegisterType((*ExecTxResult)(nil), "tendermint.abci.ExecTxResult") proto.RegisterType((*TxResult)(nil), "tendermint.abci.TxResult") + proto.RegisterType((*TxRecord)(nil), "tendermint.abci.TxRecord") proto.RegisterType((*Validator)(nil), "tendermint.abci.Validator") proto.RegisterType((*ValidatorUpdate)(nil), "tendermint.abci.ValidatorUpdate") proto.RegisterType((*VoteInfo)(nil), "tendermint.abci.VoteInfo") + proto.RegisterType((*ExtendedVoteInfo)(nil), "tendermint.abci.ExtendedVoteInfo") proto.RegisterType((*Evidence)(nil), "tendermint.abci.Evidence") proto.RegisterType((*Snapshot)(nil), "tendermint.abci.Snapshot") } @@ -3832,208 +4106,220 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, - 0x15, 0xe6, 0xff, 0xcf, 0xe3, 0xaf, 0x46, 0x8e, 0x43, 0x33, 0xb6, 0xe4, 0xac, 0x91, 0xc4, 0x71, - 0x1c, 0xb9, 0x91, 0x9b, 0xd4, 0x41, 0xd2, 0x26, 0x12, 0x4d, 0x95, 0x8a, 0x15, 0x49, 0x1d, 0x51, - 0x0e, 0xd2, 0x26, 0xde, 0x2c, 0xc9, 0x11, 0xb9, 0x31, 0xb9, 0xbb, 0xd9, 0x5d, 0x32, 0x92, 0x8f, - 0xfd, 0xb9, 0x04, 0x2d, 0x90, 0x63, 0x81, 0x22, 0xb7, 0x1e, 0x7a, 0x2d, 0xd0, 0x43, 0x4f, 0x3d, - 0x15, 0x68, 0x80, 0xf6, 0x90, 0x63, 0x0f, 0x45, 0x5a, 0x24, 0xb7, 0x5e, 0x7b, 0xe8, 0xa9, 0x40, - 0x31, 0x3f, 0xfb, 0x47, 0x72, 0xf9, 0x53, 0x39, 0xa7, 0xde, 0x66, 0xde, 0xbe, 0xf7, 0x76, 0xe6, - 0xcd, 0xcf, 0x7b, 0xdf, 0x7b, 0x03, 0x4f, 0xd9, 0x44, 0xeb, 0x10, 0x73, 0xa0, 0x6a, 0xf6, 0x2d, - 0xa5, 0xd5, 0x56, 0x6f, 0xd9, 0x67, 0x06, 0xb1, 0x36, 0x0c, 0x53, 0xb7, 0x75, 0x54, 0xf2, 0x3e, - 0x6e, 0xd0, 0x8f, 0xd5, 0x2b, 0x3e, 0xee, 0xb6, 0x79, 0x66, 0xd8, 0xfa, 0x2d, 0xc3, 0xd4, 0xf5, - 0x13, 0xce, 0x5f, 0xbd, 0xec, 0xfb, 0xcc, 0xf4, 0xf8, 0xb5, 0x05, 0xbe, 0x0a, 0xe1, 0x87, 0xe4, - 0xcc, 0xf9, 0x7a, 0x65, 0x42, 0xd6, 0x50, 0x4c, 0x65, 0xe0, 0x7c, 0x5e, 0xef, 0xea, 0x7a, 0xb7, - 0x4f, 0x6e, 0xb1, 0x5e, 0x6b, 0x78, 0x72, 0xcb, 0x56, 0x07, 0xc4, 0xb2, 0x95, 0x81, 0x21, 0x18, - 0x2e, 0x74, 0xf5, 0xae, 0xce, 0x9a, 0xb7, 0x68, 0x8b, 0x53, 0xa5, 0x7f, 0x01, 0xa4, 0x31, 0xf9, - 0x68, 0x48, 0x2c, 0x1b, 0x6d, 0x42, 0x82, 0xb4, 0x7b, 0x7a, 0x25, 0x7a, 0x35, 0x7a, 0x3d, 0xb7, - 0x79, 0x79, 0x63, 0x6c, 0x72, 0x1b, 0x82, 0xaf, 0xde, 0xee, 0xe9, 0x8d, 0x08, 0x66, 0xbc, 0xe8, - 0x65, 0x48, 0x9e, 0xf4, 0x87, 0x56, 0xaf, 0x12, 0x63, 0x42, 0x57, 0xc2, 0x84, 0x76, 0x28, 0x53, - 0x23, 0x82, 0x39, 0x37, 0xfd, 0x95, 0xaa, 0x9d, 0xe8, 0x95, 0xf8, 0xec, 0x5f, 0xed, 0x6a, 0x27, - 0xec, 0x57, 0x94, 0x17, 0x6d, 0x03, 0xa8, 0x9a, 0x6a, 0xcb, 0xed, 0x9e, 0xa2, 0x6a, 0x95, 0x04, - 0x93, 0x7c, 0x3a, 0x5c, 0x52, 0xb5, 0x6b, 0x94, 0xb1, 0x11, 0xc1, 0x59, 0xd5, 0xe9, 0xd0, 0xe1, - 0x7e, 0x34, 0x24, 0xe6, 0x59, 0x25, 0x39, 0x7b, 0xb8, 0x3f, 0xa0, 0x4c, 0x74, 0xb8, 0x8c, 0x1b, - 0xd5, 0x21, 0xd7, 0x22, 0x5d, 0x55, 0x93, 0x5b, 0x7d, 0xbd, 0xfd, 0xb0, 0x92, 0x62, 0xc2, 0x52, - 0x98, 0xf0, 0x36, 0x65, 0xdd, 0xa6, 0x9c, 0x8d, 0x08, 0x86, 0x96, 0xdb, 0x43, 0xaf, 0x43, 0xa6, - 0xdd, 0x23, 0xed, 0x87, 0xb2, 0x7d, 0x5a, 0x49, 0x33, 0x1d, 0xeb, 0x61, 0x3a, 0x6a, 0x94, 0xaf, - 0x79, 0xda, 0x88, 0xe0, 0x74, 0x9b, 0x37, 0xe9, 0xfc, 0x3b, 0xa4, 0xaf, 0x8e, 0x88, 0x49, 0xe5, - 0x33, 0xb3, 0xe7, 0x7f, 0x97, 0x73, 0x32, 0x0d, 0xd9, 0x8e, 0xd3, 0x41, 0x6f, 0x40, 0x96, 0x68, - 0x1d, 0x31, 0x8d, 0x2c, 0x53, 0x71, 0x35, 0x74, 0x9d, 0xb5, 0x8e, 0x33, 0x89, 0x0c, 0x11, 0x6d, - 0x74, 0x07, 0x52, 0x6d, 0x7d, 0x30, 0x50, 0xed, 0x0a, 0x30, 0xe9, 0xb5, 0xd0, 0x09, 0x30, 0xae, - 0x46, 0x04, 0x0b, 0x7e, 0xb4, 0x0f, 0xc5, 0xbe, 0x6a, 0xd9, 0xb2, 0xa5, 0x29, 0x86, 0xd5, 0xd3, - 0x6d, 0xab, 0x92, 0x63, 0x1a, 0x9e, 0x09, 0xd3, 0xb0, 0xa7, 0x5a, 0xf6, 0x91, 0xc3, 0xdc, 0x88, - 0xe0, 0x42, 0xdf, 0x4f, 0xa0, 0xfa, 0xf4, 0x93, 0x13, 0x62, 0xba, 0x0a, 0x2b, 0xf9, 0xd9, 0xfa, - 0x0e, 0x28, 0xb7, 0x23, 0x4f, 0xf5, 0xe9, 0x7e, 0x02, 0xfa, 0x11, 0xac, 0xf6, 0x75, 0xa5, 0xe3, - 0xaa, 0x93, 0xdb, 0xbd, 0xa1, 0xf6, 0xb0, 0x52, 0x60, 0x4a, 0x9f, 0x0f, 0x1d, 0xa4, 0xae, 0x74, - 0x1c, 0x15, 0x35, 0x2a, 0xd0, 0x88, 0xe0, 0x95, 0xfe, 0x38, 0x11, 0x3d, 0x80, 0x0b, 0x8a, 0x61, - 0xf4, 0xcf, 0xc6, 0xb5, 0x17, 0x99, 0xf6, 0x1b, 0x61, 0xda, 0xb7, 0xa8, 0xcc, 0xb8, 0x7a, 0xa4, - 0x4c, 0x50, 0x51, 0x13, 0xca, 0x86, 0x49, 0x0c, 0xc5, 0x24, 0xb2, 0x61, 0xea, 0x86, 0x6e, 0x29, - 0xfd, 0x4a, 0x89, 0xe9, 0x7e, 0x2e, 0x4c, 0xf7, 0x21, 0xe7, 0x3f, 0x14, 0xec, 0x8d, 0x08, 0x2e, - 0x19, 0x41, 0x12, 0xd7, 0xaa, 0xb7, 0x89, 0x65, 0x79, 0x5a, 0xcb, 0xf3, 0xb4, 0x32, 0xfe, 0xa0, - 0xd6, 0x00, 0x89, 0x1e, 0x26, 0x72, 0x4a, 0xc5, 0xe5, 0x91, 0x6e, 0x93, 0xca, 0xca, 0xec, 0xc3, - 0x54, 0x67, 0xac, 0xf7, 0x75, 0x9b, 0xd0, 0xc3, 0x44, 0xdc, 0x1e, 0x52, 0xe0, 0x89, 0x11, 0x31, - 0xd5, 0x93, 0x33, 0xa6, 0x46, 0x66, 0x5f, 0x2c, 0x55, 0xd7, 0x2a, 0x88, 0x29, 0x7c, 0x21, 0x4c, - 0xe1, 0x7d, 0x26, 0x44, 0x55, 0xd4, 0x1d, 0x91, 0x46, 0x04, 0xaf, 0x8e, 0x26, 0xc9, 0x74, 0x8b, - 0x9d, 0xa8, 0x9a, 0xd2, 0x57, 0x1f, 0x11, 0x71, 0x64, 0x56, 0x67, 0x6f, 0xb1, 0x1d, 0xc1, 0xed, - 0x9c, 0x9b, 0xc2, 0x89, 0x9f, 0xb0, 0x9d, 0x86, 0xe4, 0x48, 0xe9, 0x0f, 0x89, 0xf4, 0x1c, 0xe4, - 0x7c, 0x97, 0x29, 0xaa, 0x40, 0x7a, 0x40, 0x2c, 0x4b, 0xe9, 0x12, 0x76, 0xf7, 0x66, 0xb1, 0xd3, - 0x95, 0x7e, 0x1a, 0x83, 0x95, 0x89, 0x5b, 0x05, 0x21, 0x48, 0xf4, 0x14, 0xab, 0xc7, 0x98, 0xf3, - 0x98, 0xb5, 0xd1, 0x2b, 0x90, 0xea, 0x11, 0xa5, 0x43, 0x4c, 0x71, 0x13, 0x57, 0xfc, 0x63, 0xe4, - 0x5e, 0xa6, 0xc1, 0xbe, 0x6f, 0x27, 0x3e, 0xff, 0x72, 0x3d, 0x82, 0x05, 0x37, 0x3a, 0x80, 0x72, - 0x5f, 0xb1, 0x6c, 0x99, 0x9f, 0x52, 0xd9, 0x77, 0x2b, 0x4f, 0xde, 0x4d, 0x7b, 0x8a, 0x73, 0xae, - 0xe9, 0xc5, 0x2c, 0x14, 0x15, 0xfb, 0x01, 0x2a, 0xc2, 0x70, 0xa1, 0x75, 0xf6, 0x48, 0xd1, 0x6c, - 0x55, 0x23, 0xf2, 0x48, 0xe9, 0xab, 0x1d, 0xc5, 0xd6, 0x4d, 0xab, 0x92, 0xb8, 0x1a, 0xbf, 0x9e, - 0xdb, 0xbc, 0x34, 0xa1, 0xb4, 0x3e, 0x52, 0x3b, 0x44, 0x6b, 0x13, 0xa1, 0x6e, 0xd5, 0x15, 0xbe, - 0xef, 0xca, 0x4a, 0x45, 0xc8, 0xfb, 0xfd, 0x88, 0xf4, 0x69, 0xd4, 0x35, 0x20, 0xfb, 0x67, 0x05, - 0xd2, 0x23, 0x62, 0xb2, 0xd5, 0x17, 0x06, 0x14, 0x5d, 0x74, 0x0d, 0x0a, 0x6c, 0xe5, 0x64, 0xe7, - 0x3b, 0xb5, 0x4e, 0x02, 0xe7, 0x19, 0xf1, 0xbe, 0x60, 0x5a, 0x87, 0x9c, 0xb1, 0x69, 0xb8, 0x2c, - 0x71, 0xc6, 0x02, 0xc6, 0xa6, 0xe1, 0x30, 0x3c, 0x0d, 0x79, 0x3a, 0x56, 0x97, 0x23, 0xc1, 0x7e, - 0x92, 0xa3, 0x34, 0xc1, 0x22, 0xfd, 0x25, 0x06, 0xe5, 0x71, 0xdf, 0x83, 0xee, 0x40, 0x82, 0xba, - 0x61, 0xe1, 0x51, 0xab, 0x1b, 0xdc, 0x47, 0x6f, 0x38, 0x3e, 0x7a, 0xa3, 0xe9, 0xf8, 0xe8, 0xed, - 0x0c, 0x9d, 0xfc, 0xa7, 0x7f, 0x5f, 0x8f, 0x62, 0x26, 0x81, 0x2e, 0x51, 0x57, 0xa1, 0xa8, 0x9a, - 0xac, 0x76, 0xd8, 0x90, 0xb3, 0xd4, 0x0f, 0x28, 0xaa, 0xb6, 0xdb, 0x41, 0x7b, 0x50, 0x6e, 0xeb, - 0x9a, 0x45, 0x34, 0x6b, 0x68, 0xc9, 0x3c, 0x06, 0x10, 0x2b, 0xf6, 0xf4, 0xe4, 0x9a, 0xd7, 0x1c, - 0xce, 0x43, 0xc6, 0x88, 0x4b, 0xed, 0x20, 0x01, 0xed, 0x00, 0x4c, 0x2c, 0xd2, 0xa4, 0x4b, 0x70, - 0xd7, 0xe2, 0xd8, 0xe8, 0x28, 0xb6, 0xb3, 0x56, 0x3e, 0x49, 0xf4, 0x2c, 0x94, 0x14, 0xc3, 0x90, - 0x2d, 0x5b, 0xb1, 0x89, 0xdc, 0x3a, 0xb3, 0x89, 0xc5, 0x7c, 0x6c, 0x1e, 0x17, 0x14, 0xc3, 0x38, - 0xa2, 0xd4, 0x6d, 0x4a, 0x44, 0xcf, 0x40, 0x91, 0xba, 0x63, 0x55, 0xe9, 0xcb, 0x3d, 0xa2, 0x76, - 0x7b, 0x36, 0xf3, 0xa6, 0x71, 0x5c, 0x10, 0xd4, 0x06, 0x23, 0x4a, 0x1d, 0x77, 0xc5, 0x99, 0x2b, - 0xa6, 0x5b, 0xbe, 0xa3, 0xd8, 0x8a, 0xb3, 0xe5, 0x69, 0x9b, 0xd2, 0x0c, 0xc5, 0xee, 0x09, 0xfb, - 0xb0, 0x36, 0xba, 0x48, 0x8f, 0x01, 0x53, 0x1b, 0x67, 0x6a, 0x45, 0x0f, 0x5d, 0x80, 0xa4, 0x61, - 0xea, 0x23, 0xc2, 0x96, 0x2e, 0x83, 0x79, 0x47, 0x92, 0xdc, 0x35, 0x73, 0xfd, 0x25, 0x2a, 0x42, - 0xcc, 0x3e, 0x15, 0xff, 0x89, 0xd9, 0xa7, 0xd2, 0xf3, 0x50, 0x1a, 0x73, 0x88, 0xbe, 0x9f, 0x44, - 0xfd, 0x3f, 0x91, 0x30, 0x14, 0x83, 0xee, 0x7b, 0x5c, 0x19, 0xfa, 0x16, 0x24, 0xe8, 0xba, 0xb0, - 0x21, 0x17, 0xa7, 0xc4, 0x3d, 0x42, 0xae, 0x79, 0x66, 0x10, 0xcc, 0x38, 0xa5, 0x12, 0x14, 0x02, - 0x1e, 0x55, 0xba, 0x08, 0x17, 0xa6, 0x39, 0x48, 0xa9, 0xe7, 0xd2, 0x03, 0x8e, 0x0e, 0xbd, 0x0c, - 0x19, 0xd7, 0x43, 0xf2, 0x7d, 0x38, 0x79, 0x06, 0x1d, 0x66, 0xec, 0xb2, 0xd2, 0x0d, 0x48, 0xd7, - 0x93, 0xdd, 0x33, 0x31, 0x36, 0xfe, 0xb4, 0x62, 0x18, 0x0d, 0xc5, 0xea, 0x49, 0x1f, 0x40, 0x25, - 0xcc, 0xfb, 0x8d, 0x99, 0x26, 0xe1, 0xda, 0xff, 0x22, 0xa4, 0x4e, 0x74, 0x73, 0xa0, 0xd8, 0x4c, - 0x59, 0x01, 0x8b, 0x1e, 0x5d, 0x17, 0xee, 0x09, 0xe3, 0x8c, 0xcc, 0x3b, 0x92, 0x0c, 0x97, 0x42, - 0x3d, 0x20, 0x15, 0x51, 0xb5, 0x0e, 0xe1, 0x66, 0x2d, 0x60, 0xde, 0xf1, 0x14, 0xf1, 0xc1, 0xf2, - 0x0e, 0xfd, 0xad, 0xc5, 0xe6, 0xca, 0xf4, 0x67, 0xb1, 0xe8, 0x49, 0xbf, 0x88, 0xc2, 0xc5, 0xe9, - 0x7e, 0x10, 0x5d, 0x01, 0xe0, 0x37, 0x86, 0xd8, 0x6f, 0xf1, 0xeb, 0x79, 0x9c, 0x65, 0x94, 0xbb, - 0x74, 0xd3, 0x3d, 0x0b, 0x25, 0xef, 0xb3, 0x6c, 0xa9, 0x8f, 0xf8, 0x62, 0xc6, 0x71, 0xc1, 0xe5, - 0x39, 0x52, 0x1f, 0x11, 0x74, 0x13, 0x92, 0xd4, 0x2f, 0xd1, 0xa3, 0x49, 0x8f, 0xd4, 0xc5, 0xc9, - 0xa3, 0x49, 0x7d, 0x0d, 0xe6, 0x4c, 0xd2, 0x1b, 0xee, 0x35, 0xef, 0xf9, 0x3b, 0x74, 0x03, 0x12, - 0xcc, 0x43, 0xf2, 0x55, 0x0b, 0xd3, 0xc0, 0x78, 0xa4, 0x06, 0x54, 0xc3, 0xfd, 0xdb, 0x52, 0x9a, - 0x7e, 0x15, 0xf3, 0x99, 0x26, 0xe8, 0xb9, 0x1f, 0xa7, 0xdf, 0x29, 0x43, 0xdc, 0x3e, 0xe5, 0xd6, - 0xc9, 0x63, 0xda, 0x9c, 0xea, 0x89, 0x12, 0xdf, 0x84, 0x27, 0x4a, 0x9e, 0xc3, 0x13, 0xfd, 0x36, - 0xe6, 0x1e, 0xb3, 0x80, 0xb3, 0x77, 0xe6, 0x13, 0xf5, 0xe6, 0xe3, 0x58, 0x2b, 0xe6, 0xb3, 0x56, - 0xd8, 0xf5, 0xe4, 0x59, 0x31, 0x71, 0x6e, 0xef, 0x9d, 0xfc, 0x26, 0x6c, 0x96, 0x3a, 0x87, 0xcd, - 0xfe, 0x9c, 0x83, 0x0c, 0x26, 0x96, 0x41, 0x3d, 0x0f, 0xda, 0x86, 0x2c, 0x39, 0x6d, 0x13, 0xc3, - 0x76, 0x9c, 0xf5, 0xf4, 0xd8, 0x8f, 0x73, 0xd7, 0x1d, 0x4e, 0x8a, 0x62, 0x5c, 0x31, 0x74, 0x5b, - 0x00, 0xd5, 0x70, 0xcc, 0x29, 0xc4, 0xfd, 0x48, 0xf5, 0x15, 0x07, 0xa9, 0xc6, 0x43, 0x81, 0x0b, - 0x97, 0x1a, 0x83, 0xaa, 0xb7, 0x05, 0x54, 0x4d, 0xcc, 0xf9, 0x59, 0x00, 0xab, 0xd6, 0x02, 0x58, - 0x35, 0x39, 0x67, 0x9a, 0x21, 0x60, 0xf5, 0x15, 0x07, 0xac, 0xa6, 0xe6, 0x8c, 0x78, 0x0c, 0xad, - 0xee, 0x04, 0xd1, 0x2a, 0x47, 0x9a, 0xd7, 0x42, 0xa5, 0x43, 0xe1, 0xea, 0x77, 0x7d, 0x70, 0x35, - 0x13, 0x8a, 0x15, 0xb9, 0x92, 0x29, 0x78, 0xb5, 0x16, 0xc0, 0xab, 0xd9, 0x39, 0x36, 0x08, 0x01, - 0xac, 0x6f, 0xfa, 0x01, 0x2b, 0x84, 0x62, 0x5e, 0xb1, 0xde, 0xd3, 0x10, 0xeb, 0xab, 0x2e, 0x62, - 0xcd, 0x85, 0x42, 0x6e, 0x31, 0x87, 0x71, 0xc8, 0x7a, 0x30, 0x01, 0x59, 0x39, 0xc4, 0x7c, 0x36, - 0x54, 0xc5, 0x1c, 0xcc, 0x7a, 0x30, 0x81, 0x59, 0x0b, 0x73, 0x14, 0xce, 0x01, 0xad, 0xef, 0x4d, - 0x07, 0xad, 0xe1, 0xb0, 0x52, 0x0c, 0x73, 0x31, 0xd4, 0x2a, 0x87, 0xa0, 0xd6, 0x52, 0x28, 0xc2, - 0xe2, 0xea, 0x17, 0x86, 0xad, 0xc7, 0x53, 0x60, 0x2b, 0x07, 0x98, 0xd7, 0x43, 0x95, 0x2f, 0x80, - 0x5b, 0x8f, 0xa7, 0xe0, 0xd6, 0x95, 0xb9, 0x6a, 0xe7, 0x02, 0xd7, 0x9d, 0x20, 0x70, 0x45, 0x73, - 0xce, 0x55, 0x28, 0x72, 0x6d, 0x85, 0x21, 0x57, 0x8e, 0x2e, 0x6f, 0x86, 0x6a, 0x5c, 0x02, 0xba, - 0x1e, 0x4c, 0x40, 0xd7, 0x0b, 0x73, 0x76, 0xda, 0xa2, 0xd8, 0xf5, 0x79, 0x1a, 0xaa, 0x8c, 0x5d, - 0xcf, 0x34, 0xfa, 0x22, 0xa6, 0xa9, 0x9b, 0x02, 0x7e, 0xf1, 0x8e, 0x74, 0x9d, 0x06, 0xf1, 0xde, - 0x55, 0x3c, 0x03, 0xe7, 0xb2, 0x28, 0xd7, 0x77, 0xfd, 0x4a, 0xbf, 0x8f, 0x7a, 0xb2, 0xcc, 0x31, - 0xf9, 0x01, 0x40, 0x56, 0x00, 0x00, 0x1f, 0xec, 0x8b, 0x05, 0x61, 0xdf, 0x3a, 0xe4, 0x68, 0xf4, - 0x3a, 0x86, 0xe8, 0x14, 0xc3, 0x45, 0x74, 0x37, 0x60, 0x85, 0x39, 0x4e, 0x1e, 0xcb, 0x09, 0x9f, - 0x9c, 0x60, 0x3e, 0xb9, 0x44, 0x3f, 0x70, 0x2b, 0x70, 0xe7, 0xfc, 0x22, 0xac, 0xfa, 0x78, 0xdd, - 0xa8, 0x98, 0xc3, 0x9b, 0xb2, 0xcb, 0xbd, 0x25, 0xc2, 0xe3, 0x3f, 0x46, 0x3d, 0x0b, 0x79, 0x50, - 0x70, 0x1a, 0x6a, 0x8b, 0x3e, 0x26, 0xd4, 0x16, 0xfb, 0x9f, 0x51, 0x9b, 0x3f, 0xca, 0x8f, 0x07, - 0xa3, 0xfc, 0x7f, 0x47, 0xbd, 0x35, 0x71, 0x31, 0x58, 0x5b, 0xef, 0x10, 0x11, 0x77, 0xb3, 0x36, - 0x0d, 0x7b, 0xfa, 0x7a, 0x57, 0x44, 0xd7, 0xb4, 0x49, 0xb9, 0x5c, 0x7f, 0x99, 0x15, 0xee, 0xd0, - 0x0d, 0xd9, 0x93, 0xcc, 0xc2, 0x22, 0x64, 0x2f, 0x43, 0xfc, 0x21, 0xe1, 0xde, 0x2d, 0x8f, 0x69, - 0x93, 0xf2, 0xb1, 0x4d, 0xc6, 0x7c, 0x56, 0x1e, 0xf3, 0x0e, 0xba, 0x03, 0x59, 0x96, 0x44, 0x97, - 0x75, 0xc3, 0x12, 0x8e, 0xe8, 0x29, 0xff, 0x5c, 0x79, 0xae, 0x7c, 0xe3, 0x90, 0xf2, 0x1c, 0x18, - 0x16, 0xce, 0x18, 0xa2, 0xe5, 0x0b, 0xb7, 0xb2, 0x81, 0x70, 0xeb, 0x32, 0x64, 0xe9, 0xe8, 0x2d, - 0x43, 0x69, 0x13, 0xe6, 0x55, 0xb2, 0xd8, 0x23, 0x48, 0x0f, 0x00, 0x4d, 0xfa, 0x46, 0xd4, 0x80, - 0x14, 0x19, 0x11, 0xcd, 0xe6, 0x31, 0xde, 0x58, 0x14, 0x2d, 0x62, 0x21, 0xa2, 0xd9, 0xdb, 0x15, - 0x6a, 0xe4, 0x7f, 0x7e, 0xb9, 0x5e, 0xe6, 0xdc, 0x37, 0xf5, 0x81, 0x6a, 0x93, 0x81, 0x61, 0x9f, - 0x61, 0x21, 0x2f, 0xfd, 0x2d, 0x46, 0x21, 0x65, 0xc0, 0x6f, 0x4e, 0xb5, 0xad, 0xb3, 0xe5, 0x63, - 0x3e, 0xcc, 0xbb, 0x98, 0xbd, 0xd7, 0x00, 0xba, 0x8a, 0x25, 0x7f, 0xac, 0x68, 0x36, 0xe9, 0x08, - 0xa3, 0xfb, 0x28, 0xa8, 0x0a, 0x19, 0xda, 0x1b, 0x5a, 0xa4, 0x23, 0xe0, 0xb7, 0xdb, 0xf7, 0xcd, - 0x33, 0x7d, 0xbe, 0x79, 0x06, 0xad, 0x9c, 0x19, 0xb3, 0xb2, 0x0f, 0x9a, 0x65, 0xfd, 0xd0, 0x8c, - 0x8e, 0xcd, 0x30, 0x55, 0xdd, 0x54, 0xed, 0x33, 0xb6, 0x34, 0x71, 0xec, 0xf6, 0xd1, 0x35, 0x28, - 0x0c, 0xc8, 0xc0, 0xd0, 0xf5, 0xbe, 0xcc, 0xaf, 0x9b, 0x1c, 0x13, 0xcd, 0x0b, 0x62, 0x9d, 0xdd, - 0x3a, 0x3f, 0x8b, 0x79, 0xe7, 0xcf, 0x83, 0xf5, 0xff, 0x77, 0x06, 0x96, 0x7e, 0xce, 0x32, 0x52, - 0xc1, 0xc8, 0x08, 0x1d, 0xc1, 0x8a, 0x7b, 0xfc, 0xe5, 0x21, 0xbb, 0x16, 0x9c, 0x0d, 0xbd, 0xe8, - 0xfd, 0x51, 0x1e, 0x05, 0xc9, 0x16, 0x7a, 0x17, 0x9e, 0x1c, 0xbb, 0xdb, 0x5c, 0xd5, 0xb1, 0x45, - 0xaf, 0xb8, 0x27, 0x82, 0x57, 0x9c, 0xa3, 0xda, 0x33, 0x56, 0xfc, 0x9c, 0xa7, 0x6e, 0x17, 0x8a, - 0xc1, 0x40, 0x6f, 0xea, 0xf2, 0x5f, 0x83, 0x82, 0x49, 0x6c, 0x45, 0xd5, 0xe4, 0x00, 0x4e, 0xcb, - 0x73, 0xa2, 0x48, 0x4e, 0x1d, 0xc2, 0x13, 0x53, 0x03, 0x3e, 0xf4, 0x1d, 0xc8, 0x7a, 0xb1, 0x62, - 0x34, 0x04, 0x32, 0xb9, 0xc9, 0x16, 0x8f, 0x57, 0xfa, 0x43, 0xd4, 0x53, 0x19, 0x4c, 0xdf, 0xd4, - 0x21, 0x65, 0x12, 0x6b, 0xd8, 0xe7, 0x09, 0x95, 0xe2, 0xe6, 0x8b, 0x8b, 0x85, 0x8a, 0x94, 0x3a, - 0xec, 0xdb, 0x58, 0x08, 0x4b, 0x0f, 0x20, 0xc5, 0x29, 0x28, 0x07, 0xe9, 0xe3, 0xfd, 0x7b, 0xfb, - 0x07, 0xef, 0xec, 0x97, 0x23, 0x08, 0x20, 0xb5, 0x55, 0xab, 0xd5, 0x0f, 0x9b, 0xe5, 0x28, 0xca, - 0x42, 0x72, 0x6b, 0xfb, 0x00, 0x37, 0xcb, 0x31, 0x4a, 0xc6, 0xf5, 0xb7, 0xea, 0xb5, 0x66, 0x39, - 0x8e, 0x56, 0xa0, 0xc0, 0xdb, 0xf2, 0xce, 0x01, 0x7e, 0x7b, 0xab, 0x59, 0x4e, 0xf8, 0x48, 0x47, - 0xf5, 0xfd, 0xbb, 0x75, 0x5c, 0x4e, 0x4a, 0x2f, 0xc1, 0xa5, 0xd0, 0xe0, 0xd2, 0xcb, 0xcd, 0x44, - 0x7d, 0xb9, 0x19, 0xe9, 0x97, 0x31, 0xa8, 0x86, 0x47, 0x8c, 0xe8, 0xad, 0xb1, 0x89, 0x6f, 0x2e, - 0x11, 0x6e, 0x8e, 0xcd, 0x1e, 0x3d, 0x03, 0x45, 0x93, 0x9c, 0x10, 0xbb, 0xdd, 0xe3, 0x11, 0x2c, - 0x77, 0x99, 0x05, 0x5c, 0x10, 0x54, 0x26, 0x64, 0x71, 0xb6, 0x0f, 0x49, 0xdb, 0x96, 0xf9, 0x5d, - 0xc4, 0x37, 0x5d, 0x96, 0xb2, 0x51, 0xea, 0x11, 0x27, 0x4a, 0x1f, 0x2c, 0x65, 0xcb, 0x2c, 0x24, - 0x71, 0xbd, 0x89, 0xdf, 0x2d, 0xc7, 0x11, 0x82, 0x22, 0x6b, 0xca, 0x47, 0xfb, 0x5b, 0x87, 0x47, - 0x8d, 0x03, 0x6a, 0xcb, 0x55, 0x28, 0x39, 0xb6, 0x74, 0x88, 0x49, 0xe9, 0x0e, 0x3c, 0x19, 0x12, - 0xee, 0xce, 0x49, 0x4f, 0x49, 0xef, 0x79, 0xbe, 0xcb, 0x97, 0x49, 0xda, 0x81, 0xe2, 0x58, 0xa8, - 0x19, 0x9d, 0xc4, 0x42, 0x5e, 0x26, 0xc8, 0x0d, 0x23, 0x71, 0x61, 0xe4, 0xef, 0x4a, 0xbf, 0x8e, - 0xc2, 0x53, 0x33, 0x82, 0x51, 0x74, 0x6f, 0x6c, 0xcd, 0x6e, 0x2f, 0x13, 0xca, 0x8e, 0x6f, 0xd9, - 0x3b, 0x0b, 0x99, 0xf9, 0x68, 0x6f, 0xeb, 0xa8, 0x11, 0xdc, 0xb2, 0xd2, 0xef, 0x62, 0x7e, 0xfb, - 0x05, 0x83, 0xf8, 0x8b, 0x90, 0x52, 0xda, 0x34, 0x6c, 0x65, 0x43, 0xcc, 0x60, 0xd1, 0x9b, 0x91, - 0xef, 0x44, 0xaf, 0x03, 0xd8, 0xa7, 0x32, 0x1f, 0x95, 0x73, 0x0f, 0x4d, 0xe6, 0x01, 0xea, 0xa7, - 0xa4, 0xdd, 0x3c, 0x15, 0x73, 0xc8, 0xda, 0xa2, 0x65, 0xa1, 0xb7, 0xa7, 0xdd, 0xb8, 0x0b, 0xe6, - 0xd9, 0x97, 0xbb, 0x6b, 0x93, 0xe7, 0xbb, 0x6b, 0xa5, 0x3f, 0xc5, 0xbc, 0x4b, 0x28, 0x98, 0xdc, - 0xfa, 0xb6, 0x97, 0xdc, 0x5a, 0x08, 0xc3, 0xf3, 0x04, 0xd8, 0x54, 0x5f, 0x13, 0xfb, 0xe6, 0x7c, - 0x4d, 0xfc, 0xb1, 0xf9, 0x9a, 0xc4, 0x39, 0x7d, 0xcd, 0xfb, 0x50, 0x0c, 0x66, 0xdb, 0xe8, 0x15, - 0x68, 0xea, 0x43, 0xad, 0xc3, 0x76, 0x5d, 0x12, 0xf3, 0x0e, 0x7a, 0xd9, 0x49, 0x12, 0xc7, 0x42, - 0x7c, 0x05, 0x3d, 0x20, 0xbe, 0x6c, 0x9d, 0xc8, 0x16, 0x3f, 0x82, 0x24, 0x1b, 0x09, 0xf5, 0x60, - 0xac, 0x9c, 0x20, 0x40, 0x11, 0x6d, 0xa3, 0xf7, 0x01, 0x14, 0xdb, 0x36, 0xd5, 0xd6, 0xd0, 0x53, - 0xbc, 0x3e, 0x7d, 0x26, 0x5b, 0x0e, 0xdf, 0xf6, 0x65, 0x31, 0xa5, 0x0b, 0x9e, 0xa8, 0x6f, 0x5a, - 0x3e, 0x85, 0xd2, 0x3e, 0x14, 0x83, 0xb2, 0x4e, 0x18, 0xcf, 0xc7, 0x10, 0x0c, 0xe3, 0x39, 0x2a, - 0x13, 0x61, 0xbc, 0x0b, 0x02, 0xe2, 0xbc, 0x04, 0xc3, 0x3a, 0xd2, 0x4f, 0x62, 0x90, 0xf7, 0x1f, - 0x9d, 0xc7, 0x1c, 0xa8, 0x5d, 0x99, 0x12, 0xa8, 0x65, 0xbb, 0x8a, 0xf5, 0x0e, 0x8f, 0xd3, 0x2e, - 0x4d, 0xc4, 0x69, 0xe9, 0xae, 0x62, 0x1d, 0xd3, 0x30, 0xed, 0x6d, 0xc8, 0xda, 0xa7, 0xf2, 0x39, - 0x23, 0xb5, 0x8c, 0x7d, 0x5a, 0x5f, 0x24, 0x56, 0xfb, 0x24, 0x0a, 0x19, 0xd7, 0x02, 0x21, 0xe5, - 0x25, 0xcf, 0x80, 0x31, 0x7f, 0xe1, 0x83, 0x97, 0x98, 0xe2, 0x6e, 0x89, 0xe9, 0x4d, 0xf7, 0x0e, - 0x4e, 0x2c, 0x9a, 0x72, 0x73, 0x92, 0xca, 0xe2, 0xe2, 0x7d, 0x0d, 0xb2, 0xee, 0x69, 0xa4, 0x18, - 0x5b, 0xe9, 0x74, 0x4c, 0x62, 0x59, 0xc2, 0x7b, 0x3b, 0x5d, 0x56, 0x52, 0xd3, 0x3f, 0x16, 0xa5, - 0x95, 0x38, 0xe6, 0x1d, 0xa9, 0x03, 0xa5, 0xb1, 0xa3, 0x8c, 0x5e, 0x83, 0xb4, 0x31, 0x6c, 0xc9, - 0xce, 0x26, 0x19, 0x7b, 0xef, 0xe3, 0xa0, 0xb7, 0x61, 0xab, 0xaf, 0xb6, 0xef, 0x91, 0x33, 0x67, - 0x30, 0xc6, 0xb0, 0x75, 0x8f, 0xef, 0x25, 0xfe, 0x97, 0x98, 0xff, 0x2f, 0x23, 0xc8, 0x38, 0x47, - 0x03, 0x7d, 0x0f, 0xb2, 0xee, 0x2d, 0xe1, 0x56, 0x5a, 0x43, 0xaf, 0x17, 0xa1, 0xde, 0x13, 0x41, - 0x37, 0x60, 0xc5, 0x52, 0xbb, 0x1a, 0xe9, 0xc8, 0x1e, 0xca, 0x67, 0x7f, 0xcb, 0xe0, 0x12, 0xff, - 0xb0, 0xe7, 0x40, 0x7c, 0xe9, 0x3f, 0x51, 0xc8, 0x38, 0x29, 0x6f, 0xf4, 0x92, 0xef, 0xf4, 0x15, - 0xa7, 0x79, 0x04, 0xc1, 0xe8, 0x55, 0xf3, 0x82, 0x63, 0x8d, 0x2d, 0x3f, 0xd6, 0xb0, 0xfa, 0x81, - 0x53, 0x68, 0x4e, 0x2c, 0x5d, 0x68, 0xbe, 0x09, 0xc8, 0xd6, 0x6d, 0xa5, 0x2f, 0x8f, 0x74, 0x5b, - 0xd5, 0xba, 0x32, 0x37, 0x36, 0x3f, 0x28, 0x65, 0xf6, 0xe5, 0x3e, 0xfb, 0x70, 0xc8, 0xec, 0xfe, - 0xe3, 0x28, 0x64, 0xdc, 0xd0, 0x74, 0xd9, 0x5a, 0xdf, 0x45, 0x48, 0x89, 0xe8, 0x8b, 0x17, 0xfb, - 0x44, 0xcf, 0x2d, 0x94, 0x24, 0x7c, 0x85, 0x92, 0x2a, 0x64, 0x06, 0xc4, 0x56, 0xd8, 0xa9, 0xe7, - 0x89, 0x16, 0xb7, 0x7f, 0xe3, 0x55, 0xc8, 0xf9, 0xea, 0xa4, 0xf4, 0x22, 0xd8, 0xaf, 0xbf, 0x53, - 0x8e, 0x54, 0xd3, 0x9f, 0x7c, 0x76, 0x35, 0xbe, 0x4f, 0x3e, 0xa6, 0x7b, 0x16, 0xd7, 0x6b, 0x8d, - 0x7a, 0xed, 0x5e, 0x39, 0x5a, 0xcd, 0x7d, 0xf2, 0xd9, 0xd5, 0x34, 0x26, 0x2c, 0x2b, 0x7d, 0xa3, - 0x01, 0x79, 0xff, 0xaa, 0x04, 0x23, 0x0b, 0x04, 0xc5, 0xbb, 0xc7, 0x87, 0x7b, 0xbb, 0xb5, 0xad, - 0x66, 0x5d, 0xbe, 0x7f, 0xd0, 0xac, 0x97, 0xa3, 0xe8, 0x49, 0x58, 0xdd, 0xdb, 0xfd, 0x7e, 0xa3, - 0x29, 0xd7, 0xf6, 0x76, 0xeb, 0xfb, 0x4d, 0x79, 0xab, 0xd9, 0xdc, 0xaa, 0xdd, 0x2b, 0xc7, 0x36, - 0x7f, 0x93, 0x83, 0xd2, 0xd6, 0x76, 0x6d, 0x97, 0x06, 0x9f, 0x6a, 0x5b, 0x61, 0x59, 0xb0, 0x1a, - 0x24, 0x58, 0x9e, 0x6b, 0xe6, 0xd3, 0xb9, 0xea, 0xec, 0x7a, 0x05, 0xda, 0x81, 0x24, 0x4b, 0x81, - 0xa1, 0xd9, 0x6f, 0xe9, 0xaa, 0x73, 0x0a, 0x18, 0x74, 0x30, 0xec, 0x78, 0xcc, 0x7c, 0x5c, 0x57, - 0x9d, 0x5d, 0xcf, 0x40, 0x7b, 0x90, 0x76, 0x32, 0x14, 0xf3, 0x9e, 0xaa, 0x55, 0xe7, 0x16, 0x07, - 0xe8, 0xd4, 0x78, 0x26, 0x69, 0xf6, 0xbb, 0xbb, 0xea, 0x9c, 0x4a, 0x07, 0xda, 0x85, 0x94, 0x80, - 0x70, 0x73, 0x9e, 0x9f, 0x55, 0xe7, 0x25, 0xfb, 0x11, 0x86, 0xac, 0x97, 0xa3, 0x9b, 0xff, 0x9a, - 0xb0, 0xba, 0x40, 0x11, 0x07, 0x3d, 0x80, 0x42, 0x10, 0x16, 0x2e, 0xf6, 0xc4, 0xad, 0xba, 0x60, - 0x59, 0x81, 0xea, 0x0f, 0x62, 0xc4, 0xc5, 0x9e, 0xbc, 0x55, 0x17, 0xac, 0x32, 0xa0, 0x0f, 0x61, - 0x65, 0x12, 0xc3, 0x2d, 0xfe, 0x02, 0xae, 0xba, 0x44, 0xdd, 0x01, 0x0d, 0x00, 0x4d, 0xc1, 0x7e, - 0x4b, 0x3c, 0x88, 0xab, 0x2e, 0x53, 0x86, 0x40, 0x1d, 0x28, 0x8d, 0x03, 0xaa, 0x45, 0x1f, 0xc8, - 0x55, 0x17, 0x2e, 0x49, 0xf0, 0xbf, 0x04, 0x61, 0xc7, 0xa2, 0x0f, 0xe6, 0xaa, 0x0b, 0x57, 0x28, - 0xe8, 0x36, 0x08, 0x46, 0xe9, 0x8b, 0x3d, 0x4b, 0xab, 0x2e, 0x58, 0x02, 0x40, 0xc7, 0x00, 0x3e, - 0x08, 0xb9, 0xc0, 0x03, 0xbd, 0xea, 0x22, 0xb5, 0x10, 0x64, 0xc0, 0xea, 0x34, 0xe8, 0xb8, 0xcc, - 0x7b, 0xbd, 0xea, 0x52, 0x25, 0x92, 0xed, 0xfa, 0xe7, 0x5f, 0xad, 0x45, 0xbf, 0xf8, 0x6a, 0x2d, - 0xfa, 0x8f, 0xaf, 0xd6, 0xa2, 0x9f, 0x7e, 0xbd, 0x16, 0xf9, 0xe2, 0xeb, 0xb5, 0xc8, 0x5f, 0xbf, - 0x5e, 0x8b, 0xfc, 0xf0, 0x85, 0xae, 0x6a, 0xf7, 0x86, 0xad, 0x8d, 0xb6, 0x3e, 0xb8, 0xe5, 0x7f, - 0x5e, 0x3d, 0xed, 0xc9, 0x77, 0x2b, 0xc5, 0xbc, 0xe9, 0xed, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x5c, 0xd7, 0x22, 0x06, 0x12, 0x2e, 0x00, 0x00, + // 3395 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5b, 0x4b, 0x73, 0xdb, 0xd6, + 0xf5, 0xe7, 0xfb, 0x71, 0x28, 0x3e, 0x74, 0xa5, 0x38, 0x34, 0x63, 0x4b, 0x0e, 0x3c, 0x49, 0x1c, + 0x27, 0x91, 0xff, 0xb1, 0x27, 0xf9, 0x3b, 0x4d, 0xd2, 0x8c, 0x44, 0x51, 0xa1, 0x6c, 0x59, 0x52, + 0x20, 0xca, 0x99, 0xb4, 0xa9, 0x11, 0x10, 0xb8, 0x22, 0x11, 0x93, 0x00, 0x02, 0x80, 0x0a, 0x95, + 0x55, 0xa7, 0x33, 0xd9, 0x64, 0x3a, 0xd3, 0xec, 0xda, 0x99, 0x4e, 0xa6, 0x9b, 0x76, 0xa6, 0x1f, + 0xa0, 0x8b, 0xae, 0xba, 0x69, 0x17, 0x59, 0x74, 0x91, 0x5d, 0x3b, 0x5d, 0xa4, 0x9d, 0x64, 0xd7, + 0x2f, 0x90, 0x55, 0x1f, 0x73, 0x1f, 0x00, 0x01, 0x90, 0xe0, 0x23, 0xb6, 0xb3, 0xe9, 0x0e, 0xf7, + 0xf0, 0x9c, 0x83, 0x7b, 0x0f, 0xee, 0x3d, 0xe7, 0xfc, 0xce, 0xb9, 0x84, 0x27, 0x1c, 0xac, 0xab, + 0xd8, 0xea, 0x6b, 0xba, 0x73, 0x4d, 0x6e, 0x2b, 0xda, 0x35, 0xe7, 0xcc, 0xc4, 0xf6, 0x86, 0x69, + 0x19, 0x8e, 0x81, 0xca, 0xa3, 0x1f, 0x37, 0xc8, 0x8f, 0xb5, 0x8b, 0x3e, 0x6e, 0xc5, 0x3a, 0x33, + 0x1d, 0xe3, 0x9a, 0x69, 0x19, 0xc6, 0x09, 0xe3, 0xaf, 0x5d, 0xf0, 0xfd, 0x4c, 0xf5, 0xf8, 0xb5, + 0x05, 0x7e, 0xe5, 0xc2, 0xf7, 0xf1, 0x99, 0xfb, 0xeb, 0xc5, 0x31, 0x59, 0x53, 0xb6, 0xe4, 0xbe, + 0xfb, 0xf3, 0x7a, 0xc7, 0x30, 0x3a, 0x3d, 0x7c, 0x8d, 0x8e, 0xda, 0x83, 0x93, 0x6b, 0x8e, 0xd6, + 0xc7, 0xb6, 0x23, 0xf7, 0x4d, 0xce, 0xb0, 0xda, 0x31, 0x3a, 0x06, 0x7d, 0xbc, 0x46, 0x9e, 0x18, + 0x55, 0xf8, 0x0f, 0x40, 0x56, 0xc4, 0x1f, 0x0c, 0xb0, 0xed, 0xa0, 0xeb, 0x90, 0xc2, 0x4a, 0xd7, + 0xa8, 0xc6, 0x2f, 0xc5, 0xaf, 0x14, 0xae, 0x5f, 0xd8, 0x08, 0x2d, 0x6e, 0x83, 0xf3, 0x35, 0x94, + 0xae, 0xd1, 0x8c, 0x89, 0x94, 0x17, 0xbd, 0x04, 0xe9, 0x93, 0xde, 0xc0, 0xee, 0x56, 0x13, 0x54, + 0xe8, 0x62, 0x94, 0xd0, 0x0e, 0x61, 0x6a, 0xc6, 0x44, 0xc6, 0x4d, 0x5e, 0xa5, 0xe9, 0x27, 0x46, + 0x35, 0x39, 0xfd, 0x55, 0xbb, 0xfa, 0x09, 0x7d, 0x15, 0xe1, 0x45, 0x5b, 0x00, 0x9a, 0xae, 0x39, + 0x92, 0xd2, 0x95, 0x35, 0xbd, 0x9a, 0xa2, 0x92, 0x4f, 0x46, 0x4b, 0x6a, 0x4e, 0x9d, 0x30, 0x36, + 0x63, 0x62, 0x5e, 0x73, 0x07, 0x64, 0xba, 0x1f, 0x0c, 0xb0, 0x75, 0x56, 0x4d, 0x4f, 0x9f, 0xee, + 0x5b, 0x84, 0x89, 0x4c, 0x97, 0x72, 0xa3, 0x5d, 0x28, 0xb4, 0x71, 0x47, 0xd3, 0xa5, 0x76, 0xcf, + 0x50, 0xee, 0x57, 0x33, 0x54, 0x58, 0x88, 0x12, 0xde, 0x22, 0xac, 0x5b, 0x84, 0x73, 0x2b, 0x51, + 0x8d, 0x37, 0x63, 0x22, 0xb4, 0x3d, 0x0a, 0x7a, 0x0d, 0x72, 0x4a, 0x17, 0x2b, 0xf7, 0x25, 0x67, + 0x58, 0xcd, 0x52, 0x3d, 0xeb, 0x51, 0x7a, 0xea, 0x84, 0xaf, 0x35, 0x6c, 0xc6, 0xc4, 0xac, 0xc2, + 0x1e, 0xd1, 0x0e, 0x80, 0x8a, 0x7b, 0xda, 0x29, 0xb6, 0x88, 0x7c, 0x6e, 0xba, 0x0d, 0xb6, 0x19, + 0x67, 0x6b, 0xc8, 0xa7, 0x91, 0x57, 0x5d, 0x02, 0xaa, 0x43, 0x1e, 0xeb, 0x2a, 0x5f, 0x4e, 0x9e, + 0xaa, 0xb9, 0x14, 0xf9, 0xbd, 0x75, 0xd5, 0xbf, 0x98, 0x1c, 0xe6, 0x63, 0x74, 0x13, 0x32, 0x8a, + 0xd1, 0xef, 0x6b, 0x4e, 0x15, 0xa8, 0x86, 0xb5, 0xc8, 0x85, 0x50, 0xae, 0x66, 0x4c, 0xe4, 0xfc, + 0x68, 0x1f, 0x4a, 0x3d, 0xcd, 0x76, 0x24, 0x5b, 0x97, 0x4d, 0xbb, 0x6b, 0x38, 0x76, 0xb5, 0x40, + 0x35, 0x3c, 0x15, 0xa5, 0x61, 0x4f, 0xb3, 0x9d, 0x23, 0x97, 0xb9, 0x19, 0x13, 0x8b, 0x3d, 0x3f, + 0x81, 0xe8, 0x33, 0x4e, 0x4e, 0xb0, 0xe5, 0x29, 0xac, 0x2e, 0x4d, 0xd7, 0x77, 0x40, 0xb8, 0x5d, + 0x79, 0xa2, 0xcf, 0xf0, 0x13, 0xd0, 0x0f, 0x61, 0xa5, 0x67, 0xc8, 0xaa, 0xa7, 0x4e, 0x52, 0xba, + 0x03, 0xfd, 0x7e, 0xb5, 0x48, 0x95, 0x3e, 0x1b, 0x39, 0x49, 0x43, 0x56, 0x5d, 0x15, 0x75, 0x22, + 0xd0, 0x8c, 0x89, 0xcb, 0xbd, 0x30, 0x11, 0xdd, 0x83, 0x55, 0xd9, 0x34, 0x7b, 0x67, 0x61, 0xed, + 0x25, 0xaa, 0xfd, 0x6a, 0x94, 0xf6, 0x4d, 0x22, 0x13, 0x56, 0x8f, 0xe4, 0x31, 0x2a, 0x6a, 0x41, + 0xc5, 0xb4, 0xb0, 0x29, 0x5b, 0x58, 0x32, 0x2d, 0xc3, 0x34, 0x6c, 0xb9, 0x57, 0x2d, 0x53, 0xdd, + 0xcf, 0x44, 0xe9, 0x3e, 0x64, 0xfc, 0x87, 0x9c, 0xbd, 0x19, 0x13, 0xcb, 0x66, 0x90, 0xc4, 0xb4, + 0x1a, 0x0a, 0xb6, 0xed, 0x91, 0xd6, 0xca, 0x2c, 0xad, 0x94, 0x3f, 0xa8, 0x35, 0x40, 0x42, 0x0d, + 0x28, 0xe0, 0x21, 0x11, 0x97, 0x4e, 0x0d, 0x07, 0x57, 0x97, 0xa7, 0x1f, 0xac, 0x06, 0x65, 0xbd, + 0x6b, 0x38, 0x98, 0x1c, 0x2a, 0xec, 0x8d, 0x90, 0x0c, 0x8f, 0x9d, 0x62, 0x4b, 0x3b, 0x39, 0xa3, + 0x6a, 0x24, 0xfa, 0x8b, 0xad, 0x19, 0x7a, 0x15, 0x51, 0x85, 0xcf, 0x45, 0x29, 0xbc, 0x4b, 0x85, + 0x88, 0x8a, 0x86, 0x2b, 0xd2, 0x8c, 0x89, 0x2b, 0xa7, 0xe3, 0x64, 0xb2, 0xc5, 0x4e, 0x34, 0x5d, + 0xee, 0x69, 0x1f, 0x61, 0x7e, 0x6c, 0x56, 0xa6, 0x6f, 0xb1, 0x1d, 0xce, 0x4d, 0xcf, 0x0a, 0xd9, + 0x62, 0x27, 0x7e, 0xc2, 0x56, 0x16, 0xd2, 0xa7, 0x72, 0x6f, 0x80, 0x85, 0x67, 0xa0, 0xe0, 0x73, + 0xac, 0xa8, 0x0a, 0xd9, 0x3e, 0xb6, 0x6d, 0xb9, 0x83, 0xa9, 0x1f, 0xce, 0x8b, 0xee, 0x50, 0x28, + 0xc1, 0x92, 0xdf, 0x99, 0x0a, 0x9f, 0xc6, 0x3d, 0x49, 0xe2, 0x27, 0x89, 0xe4, 0x29, 0xb6, 0xe8, + 0xb2, 0xb9, 0x24, 0x1f, 0xa2, 0xcb, 0x50, 0xa4, 0x53, 0x96, 0xdc, 0xdf, 0x89, 0xb3, 0x4e, 0x89, + 0x4b, 0x94, 0x78, 0x97, 0x33, 0xad, 0x43, 0xc1, 0xbc, 0x6e, 0x7a, 0x2c, 0x49, 0xca, 0x02, 0xe6, + 0x75, 0xd3, 0x65, 0x78, 0x12, 0x96, 0xc8, 0xfa, 0x3c, 0x8e, 0x14, 0x7d, 0x49, 0x81, 0xd0, 0x38, + 0x8b, 0xf0, 0xe7, 0x04, 0x54, 0xc2, 0x0e, 0x18, 0xdd, 0x84, 0x14, 0x89, 0x45, 0x3c, 0xac, 0xd4, + 0x36, 0x58, 0xa0, 0xda, 0x70, 0x03, 0xd5, 0x46, 0xcb, 0x0d, 0x54, 0x5b, 0xb9, 0xcf, 0xbf, 0x5c, + 0x8f, 0x7d, 0xfa, 0xf7, 0xf5, 0xb8, 0x48, 0x25, 0xd0, 0x79, 0xe2, 0x2b, 0x65, 0x4d, 0x97, 0x34, + 0x95, 0x4e, 0x39, 0x4f, 0x1c, 0xa1, 0xac, 0xe9, 0xbb, 0x2a, 0xda, 0x83, 0x8a, 0x62, 0xe8, 0x36, + 0xd6, 0xed, 0x81, 0x2d, 0xb1, 0x40, 0xc8, 0x83, 0x49, 0xc0, 0x1d, 0xb2, 0xf0, 0x5a, 0x77, 0x39, + 0x0f, 0x29, 0xa3, 0x58, 0x56, 0x82, 0x04, 0xe2, 0x56, 0x4f, 0xe5, 0x9e, 0xa6, 0xca, 0x8e, 0x61, + 0xd9, 0xd5, 0xd4, 0xa5, 0xe4, 0x44, 0x7f, 0x78, 0xd7, 0x65, 0x39, 0x36, 0x55, 0xd9, 0xc1, 0x5b, + 0x29, 0x32, 0x5d, 0xd1, 0x27, 0x89, 0x9e, 0x86, 0xb2, 0x6c, 0x9a, 0x92, 0xed, 0xc8, 0x0e, 0x96, + 0xda, 0x67, 0x0e, 0xb6, 0x69, 0xa0, 0x59, 0x12, 0x8b, 0xb2, 0x69, 0x1e, 0x11, 0xea, 0x16, 0x21, + 0xa2, 0xa7, 0xa0, 0x44, 0x62, 0x92, 0x26, 0xf7, 0xa4, 0x2e, 0xd6, 0x3a, 0x5d, 0x87, 0x86, 0x94, + 0xa4, 0x58, 0xe4, 0xd4, 0x26, 0x25, 0x0a, 0xaa, 0xf7, 0xc5, 0x69, 0x3c, 0x42, 0x08, 0x52, 0xaa, + 0xec, 0xc8, 0xd4, 0x92, 0x4b, 0x22, 0x7d, 0x26, 0x34, 0x53, 0x76, 0xba, 0xdc, 0x3e, 0xf4, 0x19, + 0x9d, 0x83, 0x0c, 0x57, 0x9b, 0xa4, 0x6a, 0xf9, 0x08, 0xad, 0x42, 0xda, 0xb4, 0x8c, 0x53, 0x4c, + 0x3f, 0x5d, 0x4e, 0x64, 0x03, 0xe1, 0xc7, 0x09, 0x58, 0x1e, 0x8b, 0x5c, 0x44, 0x6f, 0x57, 0xb6, + 0xbb, 0xee, 0xbb, 0xc8, 0x33, 0x7a, 0x99, 0xe8, 0x95, 0x55, 0x6c, 0xf1, 0x68, 0x5f, 0x1d, 0x37, + 0x75, 0x93, 0xfe, 0xce, 0x4d, 0xc3, 0xb9, 0xd1, 0x6d, 0xa8, 0xf4, 0x64, 0xdb, 0x91, 0x98, 0xf7, + 0x97, 0x7c, 0x91, 0xff, 0x89, 0x31, 0x23, 0xb3, 0x58, 0x41, 0x36, 0x34, 0x57, 0x52, 0x22, 0xa2, + 0x23, 0x2a, 0x12, 0x61, 0xb5, 0x7d, 0xf6, 0x91, 0xac, 0x3b, 0x9a, 0x8e, 0xa5, 0xb1, 0xaf, 0x76, + 0x7e, 0x4c, 0x61, 0xe3, 0x54, 0x53, 0xb1, 0xae, 0xb8, 0x9f, 0x6b, 0xc5, 0x13, 0xf6, 0x3e, 0xa7, + 0x2d, 0x88, 0x50, 0x0a, 0xc6, 0x5c, 0x54, 0x82, 0x84, 0x33, 0xe4, 0x8b, 0x4f, 0x38, 0x43, 0xf4, + 0x7f, 0x90, 0x22, 0x0b, 0xa4, 0x0b, 0x2f, 0x4d, 0x48, 0x58, 0xb8, 0x5c, 0xeb, 0xcc, 0xc4, 0x22, + 0xe5, 0x14, 0x04, 0xef, 0x28, 0x78, 0x71, 0x38, 0xac, 0x55, 0x78, 0x16, 0xca, 0xa1, 0x20, 0xeb, + 0xfb, 0x76, 0x71, 0xff, 0xb7, 0x13, 0xca, 0x50, 0x0c, 0x44, 0x53, 0xe1, 0x1c, 0xac, 0x4e, 0x0a, + 0x8e, 0x42, 0xd7, 0xa3, 0x07, 0x82, 0x1c, 0x7a, 0x09, 0x72, 0x5e, 0x74, 0x64, 0x47, 0x71, 0xdc, + 0x56, 0x2e, 0xb3, 0xe8, 0xb1, 0x92, 0x33, 0x48, 0xb6, 0x34, 0xdd, 0x0b, 0x09, 0x3a, 0xf1, 0xac, + 0x6c, 0x9a, 0x4d, 0xd9, 0xee, 0x0a, 0xef, 0x41, 0x35, 0x2a, 0xf2, 0x85, 0x96, 0x91, 0xf2, 0xb6, + 0xe0, 0x39, 0xc8, 0x9c, 0x18, 0x56, 0x5f, 0x76, 0xa8, 0xb2, 0xa2, 0xc8, 0x47, 0x64, 0x6b, 0xb2, + 0x28, 0x98, 0xa4, 0x64, 0x36, 0x10, 0x24, 0x38, 0x1f, 0x19, 0xfd, 0x88, 0x88, 0xa6, 0xab, 0x98, + 0xd9, 0xb3, 0x28, 0xb2, 0xc1, 0x48, 0x11, 0x9b, 0x2c, 0x1b, 0x90, 0xd7, 0xda, 0x74, 0xad, 0x54, + 0x7f, 0x5e, 0xe4, 0x23, 0xe1, 0x0d, 0x6f, 0xeb, 0x8f, 0x62, 0x0b, 0xba, 0x0a, 0x29, 0x1a, 0x8d, + 0x98, 0x95, 0xce, 0x8d, 0x6f, 0x72, 0xc2, 0x25, 0x52, 0x1e, 0xa1, 0x09, 0xb5, 0xe8, 0x58, 0xb2, + 0x90, 0xa6, 0x3f, 0x26, 0xe0, 0xdc, 0xe4, 0x70, 0xfc, 0x50, 0xcf, 0x62, 0x05, 0x92, 0xce, 0x90, + 0xf8, 0xca, 0xe4, 0x95, 0x25, 0x91, 0x3c, 0xa2, 0x63, 0x58, 0xee, 0x19, 0x8a, 0xdc, 0x93, 0x7c, + 0x67, 0x94, 0xa7, 0xd7, 0x97, 0xc7, 0x4f, 0x13, 0x35, 0x13, 0x56, 0xc7, 0x8e, 0x69, 0x99, 0xea, + 0xd8, 0xf3, 0xce, 0x6a, 0xe4, 0x39, 0x4d, 0x7f, 0xfb, 0x73, 0x8a, 0x2e, 0xc1, 0x52, 0x5f, 0x1e, + 0x4a, 0xce, 0x90, 0x3b, 0x57, 0xe6, 0x35, 0xa1, 0x2f, 0x0f, 0x5b, 0x43, 0xea, 0x59, 0x85, 0x5f, + 0xf9, 0xad, 0x18, 0xcc, 0x35, 0x1e, 0xad, 0x15, 0x8f, 0x60, 0x95, 0xe5, 0x45, 0x58, 0x9d, 0x60, + 0xc8, 0x39, 0xfc, 0x1c, 0x72, 0xc5, 0x1f, 0xad, 0x0d, 0x85, 0x5f, 0x26, 0x3c, 0x07, 0x11, 0x48, + 0x51, 0x1e, 0xb1, 0x7d, 0xde, 0x82, 0x15, 0x15, 0x2b, 0x9a, 0xfa, 0x6d, 0xcd, 0xb3, 0xcc, 0xa5, + 0x1f, 0xb1, 0x75, 0xfe, 0x52, 0x80, 0x9c, 0x88, 0x6d, 0x93, 0x24, 0x08, 0x68, 0x0b, 0xf2, 0x78, + 0xa8, 0x60, 0xd3, 0x71, 0x73, 0xaa, 0xc9, 0xb9, 0x29, 0xe3, 0x6e, 0xb8, 0x9c, 0x04, 0x69, 0x79, + 0x62, 0xe8, 0x06, 0x07, 0xd5, 0xd1, 0xf8, 0x98, 0x8b, 0xfb, 0x51, 0xf5, 0xcb, 0x2e, 0xaa, 0x4e, + 0x46, 0x02, 0x2b, 0x26, 0x15, 0x82, 0xd5, 0x37, 0x38, 0xac, 0x4e, 0xcd, 0x78, 0x59, 0x00, 0x57, + 0xd7, 0x03, 0xb8, 0x3a, 0x3d, 0x63, 0x99, 0x11, 0xc0, 0xfa, 0x65, 0x17, 0x58, 0x67, 0x66, 0xcc, + 0x38, 0x84, 0xac, 0x6f, 0x05, 0x91, 0x75, 0x36, 0xc2, 0xed, 0xb8, 0xd2, 0x53, 0xa1, 0xf5, 0xeb, + 0x3e, 0x68, 0x9d, 0x8b, 0xc4, 0xb4, 0x4c, 0xd1, 0x04, 0x6c, 0xfd, 0x66, 0x00, 0x5b, 0xe7, 0x67, + 0xd8, 0x61, 0x0a, 0xb8, 0xde, 0xf6, 0x83, 0x6b, 0x88, 0xc4, 0xe8, 0xfc, 0xbb, 0x47, 0xa1, 0xeb, + 0x57, 0x3c, 0x74, 0x5d, 0x88, 0x2c, 0x13, 0xf0, 0xb5, 0x84, 0xe1, 0xf5, 0xc1, 0x18, 0xbc, 0x66, + 0x70, 0xf8, 0xe9, 0x48, 0x15, 0x33, 0xf0, 0xf5, 0xc1, 0x18, 0xbe, 0x2e, 0xce, 0x50, 0x38, 0x03, + 0x60, 0xbf, 0x3b, 0x19, 0x60, 0x47, 0x43, 0x60, 0x3e, 0xcd, 0xf9, 0x10, 0xb6, 0x14, 0x81, 0xb0, + 0xcb, 0x91, 0x68, 0x90, 0xa9, 0x9f, 0x1b, 0x62, 0x1f, 0x4f, 0x80, 0xd8, 0x0c, 0x0c, 0x5f, 0x89, + 0x54, 0x3e, 0x07, 0xc6, 0x3e, 0x9e, 0x80, 0xb1, 0x97, 0x67, 0xaa, 0x9d, 0x09, 0xb2, 0x77, 0x82, + 0x20, 0x1b, 0xcd, 0x38, 0x63, 0x91, 0x28, 0xbb, 0x1d, 0x85, 0xb2, 0x19, 0x12, 0x7e, 0x3e, 0x52, + 0xe3, 0x02, 0x30, 0xfb, 0x60, 0x0c, 0x66, 0xaf, 0xce, 0xd8, 0x69, 0xf3, 0xe2, 0xec, 0x67, 0x49, + 0xaa, 0x17, 0x72, 0xd5, 0x24, 0x5b, 0xc4, 0x96, 0x65, 0x58, 0x1c, 0x31, 0xb3, 0x81, 0x70, 0x85, + 0xe0, 0xae, 0x91, 0x5b, 0x9e, 0x82, 0xc9, 0x69, 0x56, 0xee, 0x73, 0xc5, 0xc2, 0xef, 0xe3, 0x23, + 0x59, 0x0a, 0x57, 0xfc, 0x98, 0x2d, 0xcf, 0x31, 0x9b, 0x0f, 0xa9, 0x27, 0x82, 0x48, 0x7d, 0x1d, + 0x0a, 0x24, 0xdb, 0x0e, 0x81, 0x70, 0xd9, 0xf4, 0x40, 0xf8, 0x55, 0x58, 0xa6, 0xe1, 0x93, 0xe1, + 0x79, 0x9e, 0x62, 0xa7, 0x68, 0x1a, 0x54, 0x26, 0x3f, 0x30, 0x2b, 0xb0, 0x5c, 0xfb, 0x05, 0x58, + 0xf1, 0xf1, 0x7a, 0x59, 0x3c, 0x43, 0xa4, 0x15, 0x8f, 0x7b, 0x93, 0xa7, 0xf3, 0x7f, 0x8a, 0x8f, + 0x2c, 0x34, 0x42, 0xef, 0x93, 0x80, 0x76, 0xfc, 0x21, 0x01, 0xed, 0xc4, 0xb7, 0x06, 0xda, 0x7e, + 0x54, 0x92, 0x0c, 0xa2, 0x92, 0x6f, 0xe2, 0xa3, 0x6f, 0xe2, 0xc1, 0x66, 0xc5, 0x50, 0x31, 0xc7, + 0x09, 0xf4, 0x99, 0x24, 0x28, 0x3d, 0xa3, 0xc3, 0xd1, 0x00, 0x79, 0x24, 0x5c, 0x5e, 0xec, 0xcc, + 0xf3, 0xd0, 0xe8, 0x41, 0x8c, 0x34, 0xb5, 0x30, 0x87, 0x18, 0x15, 0x48, 0xde, 0xc7, 0x2c, 0xd2, + 0x2d, 0x89, 0xe4, 0x91, 0xf0, 0xd1, 0x4d, 0x46, 0xe3, 0xd7, 0x92, 0xc8, 0x06, 0xe8, 0x26, 0xe4, + 0x69, 0xf1, 0x5f, 0x32, 0x4c, 0x9b, 0x07, 0xa4, 0x40, 0xa2, 0xc3, 0x6a, 0xfc, 0x1b, 0x87, 0x84, + 0xe7, 0xc0, 0xb4, 0xc5, 0x9c, 0xc9, 0x9f, 0x7c, 0xe8, 0x29, 0x1f, 0x00, 0xf0, 0x17, 0x20, 0x4f, + 0x66, 0x6f, 0x9b, 0xb2, 0x82, 0x69, 0x64, 0xc9, 0x8b, 0x23, 0x82, 0x70, 0x0f, 0xd0, 0x78, 0x9c, + 0x44, 0x4d, 0xc8, 0xe0, 0x53, 0xac, 0x3b, 0xe4, 0xb3, 0x25, 0xc3, 0x28, 0x84, 0xe7, 0x45, 0x58, + 0x77, 0xb6, 0xaa, 0xc4, 0xc8, 0xff, 0xfc, 0x72, 0xbd, 0xc2, 0xb8, 0x9f, 0x37, 0xfa, 0x9a, 0x83, + 0xfb, 0xa6, 0x73, 0x26, 0x72, 0x79, 0xe1, 0x6f, 0x09, 0x02, 0x57, 0x03, 0xf1, 0x73, 0xa2, 0x6d, + 0xdd, 0x2d, 0x9f, 0xf0, 0x95, 0x29, 0xe6, 0xb3, 0xf7, 0x45, 0x80, 0x8e, 0x6c, 0x4b, 0x1f, 0xca, + 0xba, 0x83, 0x55, 0x6e, 0xf4, 0x7c, 0x47, 0xb6, 0xdf, 0xa6, 0x04, 0xf2, 0xd5, 0xc9, 0xcf, 0x03, + 0x1b, 0xab, 0x3c, 0xf5, 0xcf, 0x76, 0x64, 0xfb, 0xd8, 0xc6, 0xaa, 0x6f, 0x95, 0xd9, 0x07, 0x5b, + 0x65, 0xd0, 0xc6, 0xb9, 0x90, 0x8d, 0x7d, 0x40, 0x32, 0xef, 0x07, 0x92, 0xa8, 0x06, 0x39, 0xd3, + 0xd2, 0x0c, 0x4b, 0x73, 0xce, 0xe8, 0x87, 0x49, 0x8a, 0xde, 0x18, 0x5d, 0x86, 0x62, 0x1f, 0xf7, + 0x4d, 0xc3, 0xe8, 0x49, 0xcc, 0xd9, 0x14, 0xa8, 0xe8, 0x12, 0x27, 0x36, 0xa8, 0xcf, 0xf9, 0x38, + 0x31, 0x3a, 0x7d, 0xa3, 0x82, 0xc1, 0xc3, 0x35, 0xef, 0xda, 0x04, 0xf3, 0xfa, 0x28, 0x64, 0x11, + 0x21, 0xfb, 0x7a, 0xe3, 0xef, 0xca, 0xc0, 0xc2, 0x4f, 0x69, 0x09, 0x31, 0x98, 0x1b, 0xa1, 0x23, + 0x58, 0xf6, 0x0e, 0xbf, 0x34, 0xa0, 0x4e, 0xc1, 0xdd, 0xce, 0xf3, 0x7a, 0x8f, 0xca, 0x69, 0x90, + 0x6c, 0xa3, 0x77, 0xe0, 0xf1, 0x90, 0x67, 0xf3, 0x54, 0x27, 0xe6, 0x75, 0x70, 0x8f, 0x05, 0x1d, + 0x9c, 0xab, 0x7a, 0x64, 0xac, 0xe4, 0x03, 0x9e, 0xb9, 0x5d, 0x28, 0x05, 0xd3, 0xbc, 0x89, 0x9f, + 0xff, 0x32, 0x14, 0x2d, 0xec, 0xc8, 0x9a, 0x2e, 0x05, 0xea, 0x7e, 0x4b, 0x8c, 0xc8, 0xab, 0x89, + 0x87, 0xf0, 0xd8, 0xc4, 0x74, 0x0f, 0xfd, 0x3f, 0xe4, 0x47, 0x99, 0x62, 0x3c, 0x02, 0x3c, 0x79, + 0xa5, 0xa1, 0x11, 0xaf, 0xf0, 0x87, 0xf8, 0x48, 0x65, 0xb0, 0xd8, 0xd4, 0x80, 0x8c, 0x85, 0xed, + 0x41, 0x8f, 0x95, 0x7f, 0x4a, 0xd7, 0x5f, 0x98, 0x2f, 0x51, 0x24, 0xd4, 0x41, 0xcf, 0x11, 0xb9, + 0xb0, 0x70, 0x0f, 0x32, 0x8c, 0x82, 0x0a, 0x90, 0x3d, 0xde, 0xbf, 0xbd, 0x7f, 0xf0, 0xf6, 0x7e, + 0x25, 0x86, 0x00, 0x32, 0x9b, 0xf5, 0x7a, 0xe3, 0xb0, 0x55, 0x89, 0xa3, 0x3c, 0xa4, 0x37, 0xb7, + 0x0e, 0xc4, 0x56, 0x25, 0x41, 0xc8, 0x62, 0xe3, 0x56, 0xa3, 0xde, 0xaa, 0x24, 0xd1, 0x32, 0x14, + 0xd9, 0xb3, 0xb4, 0x73, 0x20, 0xde, 0xd9, 0x6c, 0x55, 0x52, 0x3e, 0xd2, 0x51, 0x63, 0x7f, 0xbb, + 0x21, 0x56, 0xd2, 0xc2, 0x8b, 0x70, 0x3e, 0x32, 0xb5, 0x1c, 0x55, 0x92, 0xe2, 0xbe, 0x4a, 0x92, + 0xf0, 0x8b, 0x04, 0xd4, 0xa2, 0xf3, 0x45, 0x74, 0x2b, 0xb4, 0xf0, 0xeb, 0x0b, 0x24, 0x9b, 0xa1, + 0xd5, 0xa3, 0xa7, 0xa0, 0x64, 0xe1, 0x13, 0xec, 0x28, 0x5d, 0x96, 0xbf, 0xb2, 0x80, 0x59, 0x14, + 0x8b, 0x9c, 0x4a, 0x85, 0x6c, 0xc6, 0xf6, 0x3e, 0x56, 0x1c, 0x89, 0xf9, 0x22, 0xb6, 0xe9, 0xf2, + 0x84, 0x8d, 0x50, 0x8f, 0x18, 0x51, 0x78, 0x6f, 0x21, 0x5b, 0xe6, 0x21, 0x2d, 0x36, 0x5a, 0xe2, + 0x3b, 0x95, 0x24, 0x42, 0x50, 0xa2, 0x8f, 0xd2, 0xd1, 0xfe, 0xe6, 0xe1, 0x51, 0xf3, 0x80, 0xd8, + 0x72, 0x05, 0xca, 0xae, 0x2d, 0x5d, 0x62, 0x5a, 0x78, 0x77, 0x14, 0x7f, 0x7c, 0xd5, 0xb4, 0x1d, + 0x28, 0x85, 0xd2, 0xc5, 0xf8, 0x38, 0x9e, 0x19, 0x55, 0xc3, 0xbc, 0x54, 0x50, 0x2c, 0x9e, 0xfa, + 0x87, 0xc2, 0xaf, 0xe3, 0xf0, 0xc4, 0x94, 0x84, 0x12, 0xdd, 0x0e, 0x59, 0xfe, 0xc6, 0x22, 0xe9, + 0x68, 0x78, 0xe3, 0xdd, 0x9c, 0xcb, 0x58, 0x47, 0x7b, 0x9b, 0x47, 0xcd, 0xe0, 0xc6, 0x13, 0xbe, + 0x49, 0xc0, 0xe3, 0x11, 0x29, 0x3f, 0xc9, 0xee, 0xfa, 0x86, 0xaa, 0x9d, 0x68, 0x58, 0x95, 0x78, + 0x1d, 0x38, 0x27, 0x82, 0x4b, 0x6a, 0x0d, 0xd1, 0x4d, 0x00, 0x67, 0x28, 0x59, 0x58, 0x31, 0x2c, + 0xd5, 0x4d, 0x8f, 0xc6, 0x8f, 0x62, 0x6b, 0x28, 0x52, 0x0e, 0x31, 0xef, 0xf0, 0xa7, 0x69, 0x09, + 0x11, 0x7a, 0x8d, 0x2b, 0x25, 0xcb, 0x71, 0xcb, 0xe4, 0x17, 0x27, 0x14, 0xf6, 0xb0, 0x42, 0x14, + 0x53, 0x33, 0x50, 0xc5, 0x94, 0x1f, 0xdd, 0x99, 0xe4, 0x7a, 0xd3, 0xf3, 0xb9, 0xde, 0xc5, 0x9c, + 0x6e, 0xe6, 0xc1, 0x9c, 0xae, 0xf0, 0xbb, 0x80, 0xe5, 0x83, 0x10, 0xe8, 0x1c, 0x64, 0x64, 0x85, + 0x24, 0xfd, 0xdc, 0xe8, 0x7c, 0x34, 0xa5, 0xba, 0x1d, 0x32, 0x5b, 0xf2, 0x61, 0x98, 0x2d, 0xf5, + 0x28, 0xcc, 0x96, 0x7e, 0x40, 0xb3, 0xfd, 0x2c, 0x39, 0x72, 0xe2, 0xc1, 0x82, 0xe0, 0x43, 0xcb, + 0x1c, 0x43, 0xb6, 0x4c, 0x2c, 0x68, 0xcb, 0x89, 0xd1, 0x3f, 0xf9, 0xe8, 0xa2, 0x7f, 0xea, 0x01, + 0xa3, 0xbf, 0x7f, 0x53, 0xa5, 0x83, 0x9b, 0x6a, 0x2c, 0x50, 0x67, 0x26, 0x04, 0xea, 0x77, 0x00, + 0x7c, 0xfd, 0xae, 0x55, 0x48, 0x5b, 0xc6, 0x40, 0x57, 0xe9, 0xce, 0x4d, 0x8b, 0x6c, 0x80, 0x5e, + 0x82, 0x34, 0x71, 0x8f, 0xd1, 0x4e, 0x82, 0xb8, 0x37, 0x5f, 0xf9, 0x94, 0x71, 0x0b, 0x1a, 0xa0, + 0xf1, 0x0a, 0x7e, 0xc4, 0x2b, 0x5e, 0x0f, 0xbe, 0xe2, 0xc9, 0xc8, 0x5e, 0xc0, 0xe4, 0x57, 0x7d, + 0x04, 0x69, 0xba, 0x3d, 0x48, 0xc2, 0x42, 0x5b, 0x67, 0x1c, 0x01, 0x93, 0x67, 0xf4, 0x23, 0x00, + 0xd9, 0x71, 0x2c, 0xad, 0x3d, 0x18, 0xbd, 0x60, 0x7d, 0xf2, 0xf6, 0xda, 0x74, 0xf9, 0xb6, 0x2e, + 0xf0, 0x7d, 0xb6, 0x3a, 0x12, 0xf5, 0xed, 0x35, 0x9f, 0x42, 0x61, 0x1f, 0x4a, 0x41, 0x59, 0x17, + 0xb3, 0xb1, 0x39, 0x04, 0x31, 0x1b, 0x83, 0xe0, 0x1c, 0xb3, 0x79, 0x88, 0x2f, 0xc9, 0x5a, 0xa4, + 0x74, 0x20, 0xfc, 0x3b, 0x0e, 0x4b, 0xfe, 0xdd, 0xf9, 0xbf, 0x06, 0x7b, 0x84, 0x8f, 0xe3, 0x90, + 0xf3, 0x16, 0x1f, 0xd1, 0xa2, 0x1c, 0xd9, 0x2e, 0xe1, 0x6f, 0xc8, 0xb1, 0x9e, 0x67, 0xd2, 0xeb, + 0xa4, 0xbe, 0xea, 0xc5, 0xe9, 0xa8, 0x2a, 0xb5, 0xdf, 0xd2, 0x6e, 0x5f, 0x81, 0xc7, 0xe5, 0x9f, + 0xf3, 0x79, 0x90, 0xa0, 0x87, 0xbe, 0x47, 0x9c, 0xba, 0x57, 0x9b, 0x2f, 0x4d, 0x28, 0xd6, 0xba, + 0xac, 0x1b, 0xad, 0xe1, 0x26, 0xe5, 0x14, 0xb9, 0x04, 0x9f, 0x55, 0xc2, 0xeb, 0xc4, 0xbe, 0x41, + 0xf4, 0x32, 0x9e, 0x60, 0xc8, 0x2f, 0x01, 0x1c, 0xef, 0xdf, 0x39, 0xd8, 0xde, 0xdd, 0xd9, 0x6d, + 0x6c, 0xf3, 0x1c, 0x69, 0x7b, 0xbb, 0xb1, 0x5d, 0x49, 0x10, 0x3e, 0xb1, 0x71, 0xe7, 0xe0, 0x6e, + 0x63, 0xbb, 0x92, 0x14, 0x5e, 0x85, 0xbc, 0xe7, 0x7a, 0x50, 0x15, 0xb2, 0xb2, 0xaa, 0x5a, 0xd8, + 0xb6, 0x79, 0xf2, 0xe8, 0x0e, 0x69, 0x0b, 0xde, 0xf8, 0x90, 0xf7, 0x21, 0x93, 0x22, 0x1b, 0x08, + 0x2a, 0x94, 0x43, 0x7e, 0x0b, 0xbd, 0x0a, 0x59, 0x73, 0xd0, 0x96, 0xdc, 0x4d, 0x1b, 0xba, 0x24, + 0xe7, 0x96, 0x0e, 0x06, 0xed, 0x9e, 0xa6, 0xdc, 0xc6, 0x67, 0xae, 0x99, 0xcc, 0x41, 0xfb, 0x36, + 0xdb, 0xdb, 0xec, 0x2d, 0x09, 0xff, 0x5b, 0x7e, 0x12, 0x87, 0x9c, 0x7b, 0x56, 0xd1, 0xf7, 0x21, + 0xef, 0xf9, 0x44, 0xef, 0x6a, 0x46, 0xa4, 0x33, 0xe5, 0xfa, 0x47, 0x22, 0xe8, 0x2a, 0x2c, 0xdb, + 0x5a, 0x47, 0x77, 0xdb, 0x39, 0xac, 0x56, 0x97, 0xa0, 0x87, 0xa6, 0xcc, 0x7e, 0xd8, 0x73, 0x0b, + 0x4c, 0xb7, 0x52, 0xb9, 0x64, 0x25, 0x75, 0x2b, 0x95, 0x4b, 0x55, 0xd2, 0xc2, 0x6f, 0xe2, 0x50, + 0x09, 0x3b, 0x8e, 0xef, 0x72, 0x32, 0x24, 0x5d, 0x0e, 0xe5, 0xa3, 0x6c, 0x6f, 0x86, 0xd2, 0xcd, + 0x7f, 0xc5, 0x21, 0xe7, 0x36, 0x8c, 0xd0, 0x8b, 0x3e, 0x17, 0x56, 0x9a, 0xb4, 0x63, 0x39, 0xe3, + 0xa8, 0xfd, 0x1f, 0x5c, 0x52, 0x62, 0xf1, 0x25, 0x45, 0xdd, 0xe1, 0x70, 0x6f, 0xd3, 0xa4, 0x16, + 0xbe, 0x4d, 0xf3, 0x3c, 0x20, 0xc7, 0x70, 0xe4, 0x9e, 0x74, 0x6a, 0x38, 0x9a, 0xde, 0x91, 0xd8, + 0x0e, 0x61, 0xde, 0xa6, 0x42, 0x7f, 0xb9, 0x4b, 0x7f, 0x38, 0xf4, 0x36, 0x8b, 0x07, 0xe7, 0x16, + 0xed, 0xe6, 0x9f, 0x83, 0x0c, 0x47, 0x2c, 0xac, 0x9d, 0xcf, 0x47, 0x5e, 0x8b, 0x31, 0xe5, 0x6b, + 0x31, 0xd6, 0x20, 0xd7, 0xc7, 0x8e, 0x4c, 0x5d, 0x27, 0x8b, 0x96, 0xde, 0xf8, 0xea, 0x2b, 0x50, + 0xf0, 0x5d, 0xac, 0x20, 0xde, 0x74, 0xbf, 0xf1, 0x76, 0x25, 0x56, 0xcb, 0x7e, 0xf2, 0xd9, 0xa5, + 0xe4, 0x3e, 0xfe, 0x90, 0x1c, 0x34, 0xb1, 0x51, 0x6f, 0x36, 0xea, 0xb7, 0x2b, 0xf1, 0x5a, 0xe1, + 0x93, 0xcf, 0x2e, 0x65, 0x45, 0x4c, 0xfb, 0x39, 0x57, 0x9b, 0xb0, 0xe4, 0xff, 0x2a, 0xc1, 0x43, + 0x8d, 0xa0, 0xb4, 0x7d, 0x7c, 0xb8, 0xb7, 0x5b, 0xdf, 0x6c, 0x35, 0xa4, 0xbb, 0x07, 0xad, 0x46, + 0x25, 0x8e, 0x1e, 0x87, 0x95, 0xbd, 0xdd, 0x37, 0x9b, 0x2d, 0xa9, 0xbe, 0xb7, 0xdb, 0xd8, 0x6f, + 0x49, 0x9b, 0xad, 0xd6, 0x66, 0xfd, 0x76, 0x25, 0x71, 0xfd, 0xb7, 0x05, 0x28, 0x6f, 0x6e, 0xd5, + 0x77, 0x09, 0x60, 0xd3, 0x14, 0x99, 0xba, 0x88, 0x3a, 0xa4, 0x68, 0x65, 0x78, 0xea, 0x25, 0xd9, + 0xda, 0xf4, 0x6e, 0x1f, 0xda, 0x81, 0x34, 0x2d, 0x1a, 0xa3, 0xe9, 0xb7, 0x66, 0x6b, 0x33, 0xda, + 0x7f, 0x64, 0x32, 0xf4, 0x14, 0x4d, 0xbd, 0x46, 0x5b, 0x9b, 0xde, 0x0d, 0x44, 0x7b, 0x90, 0x75, + 0x6b, 0x7a, 0xb3, 0x2e, 0xa4, 0xd6, 0x66, 0xb6, 0xd5, 0xc8, 0xd2, 0x58, 0xed, 0x75, 0xfa, 0x0d, + 0xdb, 0xda, 0x8c, 0x3e, 0x21, 0xda, 0x85, 0x0c, 0x2f, 0x7b, 0xcc, 0xb8, 0x5c, 0x5a, 0x9b, 0xd5, + 0x1e, 0x43, 0x22, 0xe4, 0x47, 0x55, 0xed, 0xd9, 0xf7, 0x86, 0x6b, 0x73, 0xb4, 0x40, 0xd1, 0x3d, + 0x28, 0x06, 0x4b, 0x29, 0xf3, 0x5d, 0x60, 0xad, 0xcd, 0xd9, 0x88, 0x23, 0xfa, 0x83, 0x75, 0x95, + 0xf9, 0x2e, 0xb4, 0xd6, 0xe6, 0xec, 0xcb, 0xa1, 0xf7, 0x61, 0x79, 0xbc, 0xee, 0x31, 0xff, 0xfd, + 0xd6, 0xda, 0x02, 0x9d, 0x3a, 0xd4, 0x07, 0x34, 0xa1, 0x5e, 0xb2, 0xc0, 0x75, 0xd7, 0xda, 0x22, + 0x8d, 0x3b, 0xa4, 0x42, 0x39, 0x0c, 0xbf, 0xe7, 0xbd, 0xfe, 0x5a, 0x9b, 0xbb, 0x89, 0xc7, 0xde, + 0x12, 0x84, 0x9a, 0xf3, 0x5e, 0x87, 0xad, 0xcd, 0xdd, 0xd3, 0x43, 0xc7, 0x00, 0xbe, 0x82, 0xca, + 0x1c, 0xd7, 0x63, 0x6b, 0xf3, 0x74, 0xf7, 0x90, 0x09, 0x2b, 0x93, 0x0a, 0x29, 0x8b, 0xdc, 0x96, + 0xad, 0x2d, 0xd4, 0xf4, 0x23, 0xfb, 0x39, 0x08, 0x31, 0xe7, 0xbb, 0x3d, 0x5b, 0x9b, 0xb3, 0xfb, + 0xb7, 0xd5, 0xf8, 0xfc, 0xab, 0xb5, 0xf8, 0x17, 0x5f, 0xad, 0xc5, 0xff, 0xf1, 0xd5, 0x5a, 0xfc, + 0xd3, 0xaf, 0xd7, 0x62, 0x5f, 0x7c, 0xbd, 0x16, 0xfb, 0xeb, 0xd7, 0x6b, 0xb1, 0x1f, 0x3c, 0xd7, + 0xd1, 0x9c, 0xee, 0xa0, 0xbd, 0xa1, 0x18, 0xfd, 0x6b, 0xfe, 0x3f, 0x52, 0x4c, 0xfa, 0x73, 0x47, + 0x3b, 0x43, 0xa3, 0xe9, 0x8d, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf9, 0x85, 0x76, 0xba, 0xfc, + 0x31, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4061,9 +4347,9 @@ type ABCIApplicationClient interface { ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) - FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) + FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) } type aBCIApplicationClient struct { @@ -4191,15 +4477,6 @@ func (c *aBCIApplicationClient) ProcessProposal(ctx context.Context, in *Request return out, nil } -func (c *aBCIApplicationClient) FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) { - out := new(ResponseFinalizeBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/FinalizeBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *aBCIApplicationClient) ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) { out := new(ResponseExtendVote) err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ExtendVote", in, out, opts...) @@ -4218,6 +4495,15 @@ func (c *aBCIApplicationClient) VerifyVoteExtension(ctx context.Context, in *Req return out, nil } +func (c *aBCIApplicationClient) FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) { + out := new(ResponseFinalizeBlock) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/FinalizeBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ABCIApplicationServer is the server API for ABCIApplication service. type ABCIApplicationServer interface { Echo(context.Context, *RequestEcho) (*ResponseEcho, error) @@ -4233,9 +4519,9 @@ type ABCIApplicationServer interface { ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) - FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) + FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) } // UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. @@ -4281,15 +4567,15 @@ func (*UnimplementedABCIApplicationServer) PrepareProposal(ctx context.Context, func (*UnimplementedABCIApplicationServer) ProcessProposal(ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") } -func (*UnimplementedABCIApplicationServer) FinalizeBlock(ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizeBlock not implemented") -} func (*UnimplementedABCIApplicationServer) ExtendVote(ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { return nil, status.Errorf(codes.Unimplemented, "method ExtendVote not implemented") } func (*UnimplementedABCIApplicationServer) VerifyVoteExtension(ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { return nil, status.Errorf(codes.Unimplemented, "method VerifyVoteExtension not implemented") } +func (*UnimplementedABCIApplicationServer) FinalizeBlock(ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeBlock not implemented") +} func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { s.RegisterService(&_ABCIApplication_serviceDesc, srv) @@ -4529,56 +4815,56 @@ func _ABCIApplication_ProcessProposal_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } -func _ABCIApplication_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestFinalizeBlock) +func _ABCIApplication_ExtendVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestExtendVote) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ABCIApplicationServer).FinalizeBlock(ctx, in) + return srv.(ABCIApplicationServer).ExtendVote(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/FinalizeBlock", + FullMethod: "/tendermint.abci.ABCIApplication/ExtendVote", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).FinalizeBlock(ctx, req.(*RequestFinalizeBlock)) + return srv.(ABCIApplicationServer).ExtendVote(ctx, req.(*RequestExtendVote)) } return interceptor(ctx, in, info, handler) } -func _ABCIApplication_ExtendVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestExtendVote) +func _ABCIApplication_VerifyVoteExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestVerifyVoteExtension) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ABCIApplicationServer).ExtendVote(ctx, in) + return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/ExtendVote", + FullMethod: "/tendermint.abci.ABCIApplication/VerifyVoteExtension", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).ExtendVote(ctx, req.(*RequestExtendVote)) + return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, req.(*RequestVerifyVoteExtension)) } return interceptor(ctx, in, info, handler) } -func _ABCIApplication_VerifyVoteExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestVerifyVoteExtension) +func _ABCIApplication_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFinalizeBlock) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, in) + return srv.(ABCIApplicationServer).FinalizeBlock(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/VerifyVoteExtension", + FullMethod: "/tendermint.abci.ABCIApplication/FinalizeBlock", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, req.(*RequestVerifyVoteExtension)) + return srv.(ABCIApplicationServer).FinalizeBlock(ctx, req.(*RequestFinalizeBlock)) } return interceptor(ctx, in, info, handler) } @@ -4639,10 +4925,6 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "ProcessProposal", Handler: _ABCIApplication_ProcessProposal_Handler, }, - { - MethodName: "FinalizeBlock", - Handler: _ABCIApplication_FinalizeBlock_Handler, - }, { MethodName: "ExtendVote", Handler: _ABCIApplication_ExtendVote_Handler, @@ -4651,6 +4933,10 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "VerifyVoteExtension", Handler: _ABCIApplication_VerifyVoteExtension_Handler, }, + { + MethodName: "FinalizeBlock", + Handler: _ABCIApplication_FinalizeBlock_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "tendermint/abci/types.proto", @@ -5125,70 +5411,6 @@ func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *RequestFlush) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5324,12 +5546,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n23, err23 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err23 != nil { - return 0, err23 + n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err21 != nil { + return 0, err21 } - i -= n23 - i = encodeVarintTypes(dAtA, i, uint64(n23)) + i -= n21 + i = encodeVarintTypes(dAtA, i, uint64(n21)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -5387,7 +5609,7 @@ func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { +func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5397,27 +5619,61 @@ func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { +func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5427,25 +5683,32 @@ func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x10 + } + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { +func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5455,21 +5718,16 @@ func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 - } if len(m.Tx) > 0 { i -= len(m.Tx) copy(dAtA[i:], m.Tx) @@ -5480,6 +5738,34 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *RequestCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5648,57 +5934,6 @@ func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Votes) > 0 { - for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.BlockDataSize != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockDataSize)) - i-- - dAtA[i] = 0x10 - } - if len(m.BlockData) > 0 { - for iNdEx := len(m.BlockData) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.BlockData[iNdEx]) - copy(dAtA[i:], m.BlockData[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockData[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *RequestExtendVote) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5769,7 +6004,7 @@ func (m *RequestVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { +func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5779,16 +6014,21 @@ func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.MaxTxBytes != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTxBytes)) + i-- + dAtA[i] = 0x30 + } if len(m.ByzantineValidators) > 0 { for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { { @@ -5804,7 +6044,7 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) } } { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LocalLastCommit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5842,7 +6082,7 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { +func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5852,12 +6092,12 @@ func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5873,11 +6113,11 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 + dAtA[i] = 0x2a } } { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ProposedLastCommit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5885,7 +6125,16 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x22 + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } { size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -5895,27 +6144,86 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x18 - } + dAtA[i] = 0x12 if len(m.Hash) > 0 { i -= len(m.Hash) copy(dAtA[i:], m.Hash) i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.DecidedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 if len(m.Txs) > 0 { for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Txs[iNdEx]) copy(dAtA[i:], m.Txs[iNdEx]) i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a + } + } + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -7099,20 +7407,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA55 := make([]byte, len(m.RefetchChunks)*10) - var j54 int + dAtA57 := make([]byte, len(m.RefetchChunks)*10) + var j56 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA55[j54] = uint8(uint64(num)&0x7f | 0x80) + dAtA57[j56] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j54++ + j56++ } - dAtA55[j54] = uint8(num) - j54++ + dAtA57[j56] = uint8(num) + j56++ } - i -= j54 - copy(dAtA[i:], dAtA55[:j54]) - i = encodeVarintTypes(dAtA, i, uint64(j54)) + i -= j56 + copy(dAtA[i:], dAtA57[:j56]) + i = encodeVarintTypes(dAtA, i, uint64(j56)) i-- dAtA[i] = 0x12 } @@ -7124,7 +7432,7 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { +func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7134,29 +7442,32 @@ func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponsePrepareProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseExtendVote) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.BlockData) > 0 { - for iNdEx := len(m.BlockData) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.BlockData[iNdEx]) - copy(dAtA[i:], m.BlockData[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockData[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.VoteExtension != nil { + { + size, err := m.VoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { +func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7166,32 +7477,25 @@ func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseExtendVote) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.VoteExtension != nil { - { - size, err := m.VoteExtension.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { +func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7201,18 +7505,84 @@ func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponsePrepareProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Result != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.TxResults) > 0 { + for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x1a + } + if len(m.TxRecords) > 0 { + for iNdEx := len(m.TxRecords) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxRecords[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.ModifiedTx { + i-- + if m.ModifiedTx { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i-- dAtA[i] = 0x8 } @@ -7319,19 +7689,17 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x30 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x2a } if m.ConsensusParamUpdates != nil { { @@ -7343,7 +7711,7 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } if len(m.ValidatorUpdates) > 0 { for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { @@ -7356,13 +7724,27 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x1a + } + } + if len(m.TxResults) > 0 { + for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 } } - if len(m.Txs) > 0 { - for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Txs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7376,7 +7758,7 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { +func (m *CommitInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7386,12 +7768,12 @@ func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -7418,7 +7800,7 @@ func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Event) Marshal() (dAtA []byte, err error) { +func (m *ExtendedCommitInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7428,20 +7810,20 @@ func (m *Event) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Event) MarshalTo(dAtA []byte) (int, error) { +func (m *ExtendedCommitInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ExtendedCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7452,9 +7834,51 @@ func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x12 } } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa @@ -7536,10 +7960,10 @@ func (m *ExecTxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x42 } - if len(m.TxEvents) > 0 { - for iNdEx := len(m.TxEvents) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.TxEvents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7639,6 +8063,41 @@ func (m *TxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TxRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0x12 + } + if m.Action != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *Validator) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7755,6 +8214,56 @@ func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ExtendedVoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedVoteInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedVoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0x1a + } + if m.SignedLastBlock { + i-- + if m.SignedLastBlock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Evidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7780,12 +8289,12 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n62, err62 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err62 != nil { - return 0, err62 + n66, err66 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err66 != nil { + return 0, err66 } - i -= n62 - i = encodeVarintTypes(dAtA, i, uint64(n62)) + i -= n66 + i = encodeVarintTypes(dAtA, i, uint64(n66)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -8127,29 +8636,6 @@ func (m *RequestEcho) Size() (n int) { return n } -func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.LastCommitInfo.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *RequestFlush) Size() (n int) { if m == nil { return 0 @@ -8237,32 +8723,46 @@ func (m *RequestQuery) Size() (n int) { return n } -func (m *RequestDeliverTx) Size() (n int) { +func (m *RequestBeginBlock) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Tx) + l = len(m.Hash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + l = m.LastCommitInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } return n } -func (m *RequestEndBlock) Size() (n int) { +func (m *RequestCheckTx) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) } return n } -func (m *RequestCheckTx) Size() (n int) { +func (m *RequestDeliverTx) Size() (n int) { if m == nil { return 0 } @@ -8272,8 +8772,17 @@ func (m *RequestCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) + return n +} + +func (m *RequestEndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) } return n } @@ -8351,31 +8860,20 @@ func (m *RequestApplySnapshotChunk) Size() (n int) { return n } -func (m *RequestPrepareProposal) Size() (n int) { +func (m *RequestExtendVote) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.BlockData) > 0 { - for _, b := range m.BlockData { - l = len(b) - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.BlockDataSize != 0 { - n += 1 + sovTypes(uint64(m.BlockDataSize)) - } - if len(m.Votes) > 0 { - for _, e := range m.Votes { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) } return n } -func (m *RequestExtendVote) Size() (n int) { +func (m *RequestVerifyVoteExtension) Size() (n int) { if m == nil { return 0 } @@ -8388,16 +8886,35 @@ func (m *RequestExtendVote) Size() (n int) { return n } -func (m *RequestVerifyVoteExtension) Size() (n int) { +func (m *RequestPrepareProposal) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Vote != nil { - l = m.Vote.Size() + l = len(m.Hash) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.LocalLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.MaxTxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxTxBytes)) + } return n } @@ -8419,7 +8936,7 @@ func (m *RequestProcessProposal) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - l = m.LastCommitInfo.Size() + l = m.ProposedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) if len(m.ByzantineValidators) > 0 { for _, e := range m.ByzantineValidators { @@ -8436,22 +8953,19 @@ func (m *RequestFinalizeBlock) Size() (n int) { } var l int _ = l - if len(m.Txs) > 0 { - for _, b := range m.Txs { - l = len(b) - n += 1 + l + sovTypes(uint64(l)) - } - } l = len(m.Hash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } l = m.Header.Size() n += 1 + l + sovTypes(uint64(l)) - l = m.LastCommitInfo.Size() + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.DecidedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) if len(m.ByzantineValidators) > 0 { for _, e := range m.ByzantineValidators { @@ -9053,21 +9567,6 @@ func (m *ResponseApplySnapshotChunk) Size() (n int) { return n } -func (m *ResponsePrepareProposal) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.BlockData) > 0 { - for _, b := range m.BlockData { - l = len(b) - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *ResponseExtendVote) Size() (n int) { if m == nil { return 0 @@ -9093,16 +9592,22 @@ func (m *ResponseVerifyVoteExtension) Size() (n int) { return n } -func (m *ResponseProcessProposal) Size() (n int) { +func (m *ResponsePrepareProposal) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Accept { + if m.ModifiedTx { n += 2 } - l = len(m.AppHash) + if len(m.TxRecords) > 0 { + for _, e := range m.TxRecords { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } @@ -9125,14 +9630,21 @@ func (m *ResponseProcessProposal) Size() (n int) { return n } -func (m *ResponseFinalizeBlock) Size() (n int) { +func (m *ResponseProcessProposal) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Txs) > 0 { - for _, e := range m.Txs { + if m.Accept { + n += 2 + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.TxResults) > 0 { + for _, e := range m.TxResults { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9147,16 +9659,66 @@ func (m *ResponseFinalizeBlock) Size() (n int) { l = m.ConsensusParamUpdates.Size() n += 1 + l + sovTypes(uint64(l)) } + return n +} + +func (m *ResponseFinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if len(m.Events) > 0 { for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } } + if len(m.TxResults) > 0 { + for _, e := range m.TxResults { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } + return n +} + +func (m *CommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } return n } -func (m *LastCommitInfo) Size() (n int) { +func (m *ExtendedCommitInfo) Size() (n int) { if m == nil { return 0 } @@ -9240,8 +9802,8 @@ func (m *ExecTxResult) Size() (n int) { if m.GasUsed != 0 { n += 1 + sovTypes(uint64(m.GasUsed)) } - if len(m.TxEvents) > 0 { - for _, e := range m.TxEvents { + if len(m.Events) > 0 { + for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9274,6 +9836,22 @@ func (m *TxResult) Size() (n int) { return n } +func (m *TxRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovTypes(uint64(m.Action)) + } + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + func (m *Validator) Size() (n int) { if m == nil { return 0 @@ -9318,6 +9896,24 @@ func (m *VoteInfo) Size() (n int) { return n } +func (m *ExtendedVoteInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.SignedLastBlock { + n += 2 + } + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + func (m *Evidence) Size() (n int) { if m == nil { return 0 @@ -10169,7 +10765,7 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { +func (m *RequestFlush) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10192,17 +10788,67 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") + return fmt.Errorf("proto: RequestFlush: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestFlush: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10212,31 +10858,29 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) } - var msglen int + m.BlockVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10246,30 +10890,16 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.BlockVersion |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) } - var msglen int + m.P2PVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10279,30 +10909,16 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.P2PVersion |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AbciVersion", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10312,25 +10928,23 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AbciVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10353,7 +10967,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestFlush) Unmarshal(dAtA []byte) error { +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10376,217 +10990,15 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestFlush: wiretype end group for non-group") + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestFlush: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) - } - m.BlockVersion = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BlockVersion |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) - } - m.P2PVersion = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.P2PVersion |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AbciVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AbciVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestInitChain) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10679,7 +11091,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParams == nil { - m.ConsensusParams = &types.ConsensusParams{} + m.ConsensusParams = &types1.ConsensusParams{} } if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -10948,7 +11360,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { +func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10971,15 +11383,15 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11006,66 +11418,200 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthTypes } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Height = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11075,7 +11621,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Type |= CheckTxType(b&0x7F) << shift if b < 0x80 { break } @@ -11101,7 +11647,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { +func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11124,10 +11670,10 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11164,11 +11710,61 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { m.Tx = []byte{} } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - m.Type = 0 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11178,7 +11774,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -11666,7 +12262,7 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { +func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11689,17 +12285,17 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") + return fmt.Errorf("proto: RequestExtendVote: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11709,75 +12305,26 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.BlockData = append(m.BlockData, make([]byte, postIndex-iNdEx)) - copy(m.BlockData[len(m.BlockData)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockDataSize", wireType) - } - m.BlockDataSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BlockDataSize |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if m.Vote == nil { + m.Vote = &types1.Vote{} } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Votes = append(m.Votes, &types.Vote{}) - if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -11801,7 +12348,7 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { +func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11824,10 +12371,10 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestExtendVote: wiretype end group for non-group") + return fmt.Errorf("proto: RequestVerifyVoteExtension: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11860,7 +12407,7 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Vote == nil { - m.Vote = &types.Vote{} + m.Vote = &types1.Vote{} } if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11887,7 +12434,7 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { +func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11910,15 +12457,49 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestVerifyVoteExtension: wiretype end group for non-group") + return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11945,13 +12526,128 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Vote == nil { - m.Vote = &types.Vote{} + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTxBytes", wireType) + } + m.MaxTxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12103,7 +12799,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProposedLastCommit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12130,7 +12826,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ProposedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12220,7 +12916,7 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -12247,14 +12943,16 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) - copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12264,50 +12962,30 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12317,28 +12995,27 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DecidedLastCommit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12365,11 +13042,11 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DecidedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) } @@ -13633,7 +14310,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParams == nil { - m.ConsensusParams = &types.ConsensusParams{} + m.ConsensusParams = &types1.ConsensusParams{} } if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -14837,7 +15514,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types.ConsensusParams{} + m.ConsensusParamUpdates = &types1.ConsensusParams{} } if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -15415,7 +16092,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { +func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15438,17 +16115,17 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponsePrepareProposal: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseExtendVote: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15458,23 +16135,27 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.BlockData = append(m.BlockData, make([]byte, postIndex-iNdEx)) - copy(m.BlockData[len(m.BlockData)-1], dAtA[iNdEx:postIndex]) + if m.VoteExtension == nil { + m.VoteExtension = &types1.VoteExtension{} + } + if err := m.VoteExtension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15497,7 +16178,7 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { +func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15520,17 +16201,17 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseExtendVote: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseVerifyVoteExtension: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var msglen int + m.Result = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15540,28 +16221,11 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Result |= ResponseVerifyVoteExtension_Result(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VoteExtension == nil { - m.VoteExtension = &types.VoteExtension{} - } - if err := m.VoteExtension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15583,7 +16247,7 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { +func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15606,17 +16270,17 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseVerifyVoteExtension: wiretype end group for non-group") + return fmt.Errorf("proto: ResponsePrepareProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponsePrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ModifiedTx", wireType) } - m.Result = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15626,11 +16290,184 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Result |= ResponseVerifyVoteExtension_Result(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.ModifiedTx = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxRecords", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxRecords = append(m.TxRecords, &TxRecord{}) + if err := m.TxRecords[len(m.TxRecords)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxResults = append(m.TxResults, &ExecTxResult{}) + if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, &ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &types1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15833,7 +16670,7 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types.ConsensusParams{} + m.ConsensusParamUpdates = &types1.ConsensusParams{} } if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -15891,7 +16728,7 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15918,12 +16755,46 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Txs = append(m.Txs, &ResponseDeliverTx{}) - if err := m.Txs[len(m.Txs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxResults = append(m.TxResults, &ExecTxResult{}) + if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) } @@ -15957,7 +16828,7 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) } @@ -15987,15 +16858,137 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types.ConsensusParams{} + m.ConsensusParamUpdates = &types1.ConsensusParams{} } if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex - case 4: + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + } + m.RetainHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetainHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16022,8 +17015,8 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Votes = append(m.Votes, VoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16048,7 +17041,7 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { +func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16071,10 +17064,10 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LastCommitInfo: wiretype end group for non-group") + return fmt.Errorf("proto: ExtendedCommitInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LastCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExtendedCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -16125,7 +17118,7 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Votes = append(m.Votes, VoteInfo{}) + m.Votes = append(m.Votes, ExtendedVoteInfo{}) if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -16587,7 +17580,7 @@ func (m *ExecTxResult) Unmarshal(dAtA []byte) error { } case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TxEvents", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16614,8 +17607,8 @@ func (m *ExecTxResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TxEvents = append(m.TxEvents, Event{}) - if err := m.TxEvents[len(m.TxEvents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16827,6 +17820,109 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { } return nil } +func (m *TxRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= TxRecord_TxAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Validator) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -17135,6 +18231,143 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { } return nil } +func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedVoteInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedVoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedLastBlock", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SignedLastBlock = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Evidence) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/abci/types/types_test.go b/abci/types/types_test.go new file mode 100644 index 000000000..f79a24454 --- /dev/null +++ b/abci/types/types_test.go @@ -0,0 +1,74 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" +) + +func TestHashAndProveResults(t *testing.T) { + trs := []*abci.ExecTxResult{ + // Note, these tests rely on the first two entries being in this order. + {Code: 0, Data: nil}, + {Code: 0, Data: []byte{}}, + + {Code: 0, Data: []byte("one")}, + {Code: 14, Data: nil}, + {Code: 14, Data: []byte("foo")}, + {Code: 14, Data: []byte("bar")}, + } + + // Nil and []byte{} should produce the same bytes + bz0, err := trs[0].Marshal() + require.NoError(t, err) + bz1, err := trs[1].Marshal() + require.NoError(t, err) + require.Equal(t, bz0, bz1) + + // Make sure that we can get a root hash from results and verify proofs. + rs, err := abci.MarshalTxResults(trs) + require.NoError(t, err) + root := merkle.HashFromByteSlices(rs) + assert.NotEmpty(t, root) + + _, proofs := merkle.ProofsFromByteSlices(rs) + for i, tr := range trs { + bz, err := tr.Marshal() + require.NoError(t, err) + + valid := proofs[i].Verify(root, bz) + assert.NoError(t, valid, "%d", i) + } +} + +func TestHashDeterministicFieldsOnly(t *testing.T) { + tr1 := abci.ExecTxResult{ + Code: 1, + Data: []byte("transaction"), + Log: "nondeterministic data: abc", + Info: "nondeterministic data: abc", + GasWanted: 1000, + GasUsed: 1000, + Events: []abci.Event{}, + Codespace: "nondeterministic.data.abc", + } + tr2 := abci.ExecTxResult{ + Code: 1, + Data: []byte("transaction"), + Log: "nondeterministic data: def", + Info: "nondeterministic data: def", + GasWanted: 1000, + GasUsed: 1000, + Events: []abci.Event{}, + Codespace: "nondeterministic.data.def", + } + r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1}) + require.NoError(t, err) + r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2}) + require.NoError(t, err) + require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2)) +} diff --git a/buf.gen.yaml b/buf.gen.yaml index 335e25241..d972360bb 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -1,14 +1,9 @@ -# The version of the generation template (required). -# The only currently-valid value is v1beta1. -version: v1beta1 - -# The plugins to run. +version: v1 plugins: - # The name of the plugin. - name: gogofaster - # The directory where the generated proto output will be written. - # The directory is relative to where the generation tool was run. - out: proto - # Set options to assign import paths to the well-known types - # and to enable service generation. - opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative + out: ./proto/ + opt: + - Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types + - Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration + - plugins=grpc + - paths=source_relative diff --git a/buf.work.yaml b/buf.work.yaml new file mode 100644 index 000000000..1878b341b --- /dev/null +++ b/buf.work.yaml @@ -0,0 +1,3 @@ +version: v1 +directories: + - proto diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 5ecbba617..6cec32738 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { Height: b.Height, Index: uint32(i), Tx: b.Data.Txs[i], - Result: *(r.FinalizeBlock.Txs[i]), + Result: *(r.FinalizeBlock.TxResults[i]), } _ = batch.Add(&tr) diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index c525d4baa..826fa0233 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -153,10 +153,10 @@ func TestReIndexEvent(t *testing.T) { On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once(). On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil) - dtx := abcitypes.ResponseDeliverTx{} + dtx := abcitypes.ExecTxResult{} abciResp := &prototmstate.ABCIResponses{ FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ - Txs: []*abcitypes.ResponseDeliverTx{&dtx}, + TxResults: []*abcitypes.ExecTxResult{&dtx}, }, } diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index ce0798e45..8236b2bd4 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -2,6 +2,7 @@ package commands import ( "os" + "path/filepath" "github.com/spf13/cobra" @@ -31,6 +32,20 @@ func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command return cmd } +// MakeResetStateCommand constructs a command that removes the database of +// the specified Tendermint core instance. +func MakeResetStateCommand(conf *config.Config, logger log.Logger) *cobra.Command { + var keyType string + + return &cobra.Command{ + Use: "reset-state", + Short: "Remove all the data and WAL", + RunE: func(cmd *cobra.Command, args []string) error { + return resetState(conf.DBDir(), logger, keyType) + }, + } +} + func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command { var keyType string @@ -55,18 +70,76 @@ func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *c // it's only suitable for testnets. // resetAll removes address book files plus all data, and resets the privValdiator data. -// Exported so other CLI tools can use it. func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error { if err := os.RemoveAll(dbDir); err == nil { logger.Info("Removed all blockchain history", "dir", dbDir) } else { logger.Error("error removing all blockchain history", "dir", dbDir, "err", err) } - // recreate the dbDir since the privVal state needs to live there + + return resetFilePV(privValKeyFile, privValStateFile, logger, keyType) +} + +// resetState removes address book files plus all databases. +func resetState(dbDir string, logger log.Logger, keyType string) error { + blockdb := filepath.Join(dbDir, "blockstore.db") + state := filepath.Join(dbDir, "state.db") + wal := filepath.Join(dbDir, "cs.wal") + evidence := filepath.Join(dbDir, "evidence.db") + txIndex := filepath.Join(dbDir, "tx_index.db") + peerstore := filepath.Join(dbDir, "peerstore.db") + + if tmos.FileExists(blockdb) { + if err := os.RemoveAll(blockdb); err == nil { + logger.Info("Removed all blockstore.db", "dir", blockdb) + } else { + logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err) + } + } + + if tmos.FileExists(state) { + if err := os.RemoveAll(state); err == nil { + logger.Info("Removed all state.db", "dir", state) + } else { + logger.Error("error removing all state.db", "dir", state, "err", err) + } + } + + if tmos.FileExists(wal) { + if err := os.RemoveAll(wal); err == nil { + logger.Info("Removed all cs.wal", "dir", wal) + } else { + logger.Error("error removing all cs.wal", "dir", wal, "err", err) + } + } + + if tmos.FileExists(evidence) { + if err := os.RemoveAll(evidence); err == nil { + logger.Info("Removed all evidence.db", "dir", evidence) + } else { + logger.Error("error removing all evidence.db", "dir", evidence, "err", err) + } + } + + if tmos.FileExists(txIndex) { + if err := os.RemoveAll(txIndex); err == nil { + logger.Info("Removed tx_index.db", "dir", txIndex) + } else { + logger.Error("error removing tx_index.db", "dir", txIndex, "err", err) + } + } + + if tmos.FileExists(peerstore) { + if err := os.RemoveAll(peerstore); err == nil { + logger.Info("Removed peerstore.db", "dir", peerstore) + } else { + logger.Error("error removing peerstore.db", "dir", peerstore, "err", err) + } + } if err := tmos.EnsureDir(dbDir, 0700); err != nil { logger.Error("unable to recreate dbDir", "err", err) } - return resetFilePV(privValKeyFile, privValStateFile, logger, keyType) + return nil } func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error { diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 90146fecd..84f7b386e 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -34,6 +34,7 @@ func main() { commands.MakeReplayCommand(conf, logger), commands.MakeReplayConsoleCommand(conf, logger), commands.MakeResetAllCommand(conf, logger), + commands.MakeResetStateCommand(conf, logger), commands.MakeResetPrivateValidatorCommand(conf, logger), commands.MakeShowValidatorCommand(conf, logger), commands.MakeTestnetFilesCommand(conf, logger), diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 27b58721b..8f916427d 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -27,17 +27,17 @@ Usage: abci-cli [command] Available Commands: - batch Run a batch of abci commands against an application - check_tx Validate a tx - commit Commit the application state and return the Merkle root hash - console Start an interactive abci console for multiple commands - deliver_tx Deliver a new tx to the application - kvstore ABCI demo example - echo Have the application echo a message - help Help about any command - info Get some info about the application - query Query the application state - set_option Set an options on the application + batch Run a batch of abci commands against an application + check_tx Validate a tx + commit Commit the application state and return the Merkle root hash + console Start an interactive abci console for multiple commands + finalize_block Send a set of transactions to the application + kvstore ABCI demo example + echo Have the application echo a message + help Help about any command + info Get some info about the application + query Query the application state + set_option Set an options on the application Flags: --abci string socket or grpc (default "socket") @@ -53,7 +53,7 @@ Use "abci-cli [command] --help" for more information about a command. The `abci-cli` tool lets us send ABCI messages to our application, to help build and debug them. -The most important messages are `deliver_tx`, `check_tx`, and `commit`, +The most important messages are `finalize_block`, `check_tx`, and `commit`, but there are others for convenience, configuration, and information purposes. @@ -173,7 +173,7 @@ Try running these commands: -> code: OK -> data.hex: 0x0000000000000000 -> deliver_tx "abc" +> finalize_block "abc" -> code: OK > info @@ -192,7 +192,7 @@ Try running these commands: -> value: abc -> value.hex: 616263 -> deliver_tx "def=xyz" +> finalize_block "def=xyz" -> code: OK > commit @@ -207,8 +207,8 @@ Try running these commands: -> value.hex: 78797A ``` -Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if -we do `deliver_tx "abc=efg"` it will store `(abc, efg)`. +Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if +we do `finalize_block "abc=efg"` it will store `(abc, efg)`. Similarly, you could put the commands in a file and run `abci-cli --verbose batch < myfile`. diff --git a/docs/architecture/README.md b/docs/architecture/README.md index a29e69db0..00faaa8b8 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -85,6 +85,11 @@ Note the context/background should be written in the present tense. - [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md) - [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md) - [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md) +- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md) + +### Deprecated + +None ### Rejected @@ -92,7 +97,6 @@ Note the context/background should be written in the present tense. - [ADR-029: Check-Tx-Consensus](./adr-029-check-tx-consensus.md) - [ADR-058: Event-Hashing](./adr-058-event-hashing.md) - ### Proposed - [ADR-007: Trust-Metric-Usage](./adr-007-trust-metric-usage.md) diff --git a/docs/architecture/adr-075-rpc-subscription.md b/docs/architecture/adr-075-rpc-subscription.md index f2c4dcf12..1ca48e712 100644 --- a/docs/architecture/adr-075-rpc-subscription.md +++ b/docs/architecture/adr-075-rpc-subscription.md @@ -2,6 +2,7 @@ ## Changelog +- 01-Mar-2022: Update long-polling interface (@creachadair). - 10-Feb-2022: Updates to reflect implementation. - 26-Jan-2022: Marked accepted. - 22-Jan-2022: Updated and expanded (@creachadair). @@ -347,8 +348,8 @@ limit. The `wait_time` parameter is used to effect polling. If `before` is empty and no items are available, the server will wait for up to `wait_time` for matching -items to arrive at the head of the log. If `wait_time` is zero, the server will -return whatever eligible items are available immediately. +items to arrive at the head of the log. If `wait_time` is zero or negative, the +server will wait for a default (positive) interval. If `before` non-empty, `wait_time` is ignored: new results are only added to the head of the log, so there is no need to wait. This allows the client to diff --git a/docs/architecture/adr-081-protobuf-mgmt.md b/docs/architecture/adr-081-protobuf-mgmt.md new file mode 100644 index 000000000..1199cff1b --- /dev/null +++ b/docs/architecture/adr-081-protobuf-mgmt.md @@ -0,0 +1,201 @@ +# ADR 081: Protocol Buffers Management + +## Changelog + +- 2022-02-28: First draft + +## Status + +Accepted + +[Tracking issue](https://github.com/tendermint/tendermint/issues/8121) + +## Context + +At present, we manage the [Protocol Buffers] schema files ("protos") that define +our wire-level data formats within the Tendermint repository itself (see the +[`proto`](../../proto/) directory). Recently, we have been making use of [Buf], +both locally and in CI, in order to generate Go stubs, and lint and check +`.proto` files for breaking changes. + +The version of Buf used at the time of this decision was `v1beta1`, and it was +discussed in [\#7975] and in weekly calls as to whether we should upgrade to +`v1` and harmonize our approach with that used by the Cosmos SDK. The team +managing the Cosmos SDK was primarily interested in having our protos versioned +and easily accessible from the [Buf] registry. + +The three main sets of stakeholders for the `.proto` files and their needs, as +currently understood, are as follows. + +1. Tendermint needs Go code generated from `.proto` files. +2. Consumers of Tendermint's `.proto` files, specifically projects that want to + interoperate with Tendermint and need to generate code for their own + programming language, want to be able to access these files in a reliable and + efficient way. +3. The Tendermint Core team wants to provide stable interfaces that are as easy + as possible to maintain, on which consumers can depend, and to be able to + notify those consumers promptly when those interfaces change. To this end, we + want to: + 1. Prevent any breaking changes from being introduced in minor/patch releases + of Tendermint. Only major version updates should be able to contain + breaking interface changes. + 2. Prevent generated code from diverging from the Protobuf schema files. + +There was also discussion surrounding the notion of automated documentation +generation and hosting, but it is not clear at this time whether this would be +that valuable to any of our stakeholders. What will, of course, be valuable at +minimum would be better documentation (in comments) of the `.proto` files +themselves. + +## Alternative Approaches + +### Meeting stakeholders' needs + +1. Go stub generation from protos. We could use: + 1. [Buf]. This approach has been rather cumbersome up to this point, and it + is not clear what Buf really provides beyond that which `protoc` provides + to justify the additional complexity in configuring Buf for stub + generation. + 2. [protoc] - the Protocol Buffers compiler. +2. Notification of breaking changes: + 1. Buf in CI for all pull requests to *release* branches only (and not on + `master`). + 2. Buf in CI on every pull request to every branch (this was the case at the + time of this decision, and the team decided that the signal-to-noise ratio + for this approach was too low to be of value). +3. `.proto` linting: + 1. Buf in CI on every pull request +4. `.proto` formatting: + 1. [clang-format] locally and a [clang-format GitHub Action] in CI to check + that files are formatted properly on every pull request. +5. Sharing of `.proto` files in a versioned, reliable manner: + 1. Consumers could simply clone the Tendermint repository, check out a + specific commit, tag or branch and manually copy out all of the `.proto` + files they need. This requires no effort from the Tendermint Core team and + will continue to be an option for consumers. The drawback of this approach + is that it requires manual coding/scripting to implement and is brittle in + the face of bigger changes. + 2. Uploading our `.proto` files to Buf's registry on every release. This is + by far the most seamless for consumers of our `.proto` files, but requires + the dependency on Buf. This has the additional benefit that the Buf + registry will automatically [generate and host + documentation][buf-docs-gen] for these protos. + 3. We could create a process that, upon release, creates a `.zip` file + containing our `.proto` files. + +### Popular alternatives to Buf + +[Prototool] was not considered as it appears deprecated, and the ecosystem seems +to be converging on Buf at this time. + +### Tooling complexity + +The more tools we have in our build/CI processes, the more complex and fragile +repository/CI management becomes, and the longer it takes to onboard new team +members. Maintainability is a core concern here. + +### Buf sustainability and costs + +One of the primary considerations regarding the usage of Buf is whether, for +example, access to its registry will eventually become a +paid-for/subscription-based service and whether this is valuable enough for us +and the ecosystem to pay for such a service. At this time, it appears as though +Buf will never charge for hosting open source projects' protos. + +Another consideration was Buf's sustainability as a project - what happens when +their resources run out? Will there be a strong and broad enough open source +community to continue maintaining it? + +### Local Buf usage options + +Local usage of Buf (i.e. not in CI) can be accomplished in two ways: + +1. Installing the relevant tools individually. +2. By way of its [Docker image][buf-docker]. + +Local installation of Buf requires developers to manually keep their toolchains +up-to-date. The Docker option comes with a number of complexities, including +how the file system permissions of code generated by a Docker container differ +between platforms (e.g. on Linux, Buf-generated code ends up being owned by +`root`). + +The trouble with the Docker-based approach is that we make use of the +[gogoprotobuf] plugin for `protoc`. Continuing to use the Docker-based approach +to using Buf will mean that we will have to continue building our own custom +Docker image with embedded gogoprotobuf. + +Along these lines, we could eventually consider coming up with a [Nix]- or +[redo]-based approach to developer tooling to ensure tooling consistency across +the team and for anyone who wants to be able to contribute to Tendermint. + +## Decision + +1. We will adopt Buf for now for proto generation, linting, breakage checking + and its registry (mainly in CI, with optional usage locally). +2. Failing CI when checking for breaking changes in `.proto` files will only + happen when performing minor/patch releases. +3. Local tooling will be favored over Docker-based tooling. + +## Detailed Design + +We currently aim to: + +1. Update to Buf `v1` to facilitate linting, breakage checking and uploading to + the Buf registry. +2. Configure CI appropriately for proto management: + 1. Uploading protos to the Buf registry on every release (e.g. the + [approach][cosmos-sdk-buf-registry-ci] used by the Cosmos SDK). + 2. Linting on every pull request (e.g. the + [approach][cosmos-sdk-buf-linting-ci] used by the Cosmos SDK). The linter + passing should be considered a requirement for accepting PRs. + 3. Checking for breaking changes in minor/patch version releases and failing + CI accordingly - see [\#8003]. + 4. Add [clang-format GitHub Action] to check `.proto` file formatting. Format + checking should be considered a requirement for accepting PRs. +3. Update the Tendermint [`Makefile`](../../Makefile) to primarily facilitate + local Protobuf stub generation, linting, formatting and breaking change + checking. More specifically: + 1. This includes removing the dependency on Docker and introducing the + dependency on local toolchain installation. CI-based equivalents, where + relevant, will rely on specific GitHub Actions instead of the Makefile. + 2. Go code generation will rely on `protoc` directly. + +## Consequences + +### Positive + +- We will still offer Go stub generation, proto linting and breakage checking. +- Breakage checking will only happen on minor/patch releases to increase the + signal-to-noise ratio in CI. +- Versioned protos will be made available via Buf's registry upon every release. + +### Negative + +- Developers/contributors will need to install the relevant Protocol + Buffers-related tooling (Buf, gogoprotobuf, clang-format) locally in order to + build, lint, format and check `.proto` files for breaking changes. + +### Neutral + +## References + +- [Protocol Buffers] +- [Buf] +- [\#7975] +- [protoc] - The Protocol Buffers compiler + +[Protocol Buffers]: https://developers.google.com/protocol-buffers +[Buf]: https://buf.build/ +[\#7975]: https://github.com/tendermint/tendermint/pull/7975 +[protoc]: https://github.com/protocolbuffers/protobuf +[clang-format]: https://clang.llvm.org/docs/ClangFormat.html +[clang-format GitHub Action]: https://github.com/marketplace/actions/clang-format-github-action +[buf-docker]: https://hub.docker.com/r/bufbuild/buf +[cosmos-sdk-buf-registry-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto-registry.yml +[cosmos-sdk-buf-linting-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto.yml#L15 +[\#8003]: https://github.com/tendermint/tendermint/issues/8003 +[Nix]: https://nixos.org/ +[gogoprotobuf]: https://github.com/gogo/protobuf +[Prototool]: https://github.com/uber/prototool +[buf-docs-gen]: https://docs.buf.build/bsr/documentation +[redo]: https://redo.readthedocs.io/en/latest/ diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md index 00e553437..27225fd70 100644 --- a/docs/architecture/adr-template.md +++ b/docs/architecture/adr-template.md @@ -6,12 +6,30 @@ ## Status -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" -> once it is agreed upon. Once the ADR has been implemented mark the ADR as -> "implemented". If a later ADR changes or reverses a decision, it may be marked -> as "deprecated" or "superseded" with a reference to its replacement. +> An architecture decision is considered "proposed" when a PR containing the ADR +> is submitted. When merged, an ADR must have a status associated with it, which +> must be one of: "Accepted", "Rejected", "Deprecated" or "Superseded". +> +> An accepted ADR's implementation status must be tracked via a tracking issue, +> milestone or project board (only one of these is necessary). For example: +> +> Accepted +> +> [Tracking issue](https://github.com/tendermint/tendermint/issues/123) +> [Milestone](https://github.com/tendermint/tendermint/milestones/123) +> [Project board](https://github.com/orgs/tendermint/projects/123) +> +> Rejected ADRs are captured as a record of recommendations that we specifically +> do not (and possibly never) want to implement. The ADR itself must, for +> posterity, include reasoning as to why it was rejected. +> +> If an ADR is deprecated, simply write "Deprecated" in this section. If an ADR +> is superseded by one or more other ADRs, provide local a reference to those +> ADRs, e.g.: +> +> Superseded by [ADR 123](./adr-123.md) -{Deprecated|Declined|Accepted|Implemented} +Accepted | Rejected | Deprecated | Superseded by ## Context diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index 2e1f03341..ebd21d998 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -594,7 +594,7 @@ This section will cover settings within the p2p section of the `config.toml`. - `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. - `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id. -Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced. +Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config parameters being deprecated and/or replaced. We will cover the new and deprecated parameters below. ### New Parameters diff --git a/docs/rfc/rfc-015-abci++-tx-mutation.md b/docs/rfc/rfc-015-abci++-tx-mutation.md new file mode 100644 index 000000000..3c7854ed3 --- /dev/null +++ b/docs/rfc/rfc-015-abci++-tx-mutation.md @@ -0,0 +1,261 @@ +# RFC 015: ABCI++ TX Mutation + +## Changelog + +- 23-Feb-2022: Initial draft (@williambanfield). +- 28-Feb-2022: Revised draft (@williambanfield). + +## Abstract + +A previous version of the ABCI++ specification detailed a mechanism for proposers to replace transactions +in the proposed block. This scheme required the proposer to construct new transactions +and mark these new transactions as replacing other removed transactions. The specification +was ambiguous as to how the replacement may be communicated to peer nodes. +This RFC discusses issues with this mechanism and possible solutions. + +## Background + +### What is the proposed change? + +A previous version of the ABCI++ specification proposed mechanisms for adding, removing, and replacing +transactions in a proposed block. To replace a transaction, the application running +`ProcessProposal` could mark a transaction as replaced by other application-supplied +transactions by returning a new transaction marked with the `ADDED` flag setting +the `new_hashes` field of the removed transaction to contain the list of transaction hashes +that replace it. In that previous specification for ABCI++, the full use of the +`new_hashes` field is left somewhat ambiguous. At present, these hashes are not +gossiped and are not eventually included in the block to signal replacement to +other nodes. The specification did indicate that the transactions specified in +the `new_hashes` field will be removed from the mempool but it's not clear how +peer nodes will learn about them. + +### What systems would be affected by adding transaction replacement? + +The 'transaction' is a central building block of a Tendermint blockchain, so adding +a mechanism for transaction replacement would require changes to many aspects of Tendermint. + +The following is a rough list of the functionality that this mechanism would affect: + +#### Transaction indexing + +Tendermint's indexer stores transactions and transaction results using the hash of the executed +transaction [as the key][tx-result-index] and the ABCI results and transaction bytes as the value. + +To allow transaction replacement, the replaced transactions would need to stored as well in the +indexer, likely as a mapping of original transaction to list of transaction hashes that replaced +the original transaction. + +#### Transaction inclusion proofs + +The result of a transaction query includes a Merkle proof of the existence of the +transaction in the block chain. This [proof is built][inclusion-proof] as a merkle tree +of the hashes of all of the transactions in the block where the queried transaction was executed. + +To allow transaction replacement, these proofs would need to be updated to prove +that a replaced transaction was included by replacement in the block. + +#### RPC-based transaction query parameters and results + +Tendermint's RPC allows clients to retrieve information about transactions via the +`/tx_search` and `/tx` RPC endpoints. + +RPC query results containing replaced transactions would need to be updated to include +information on replaced transactions, either by returning results for all of the replaced +transactions, or by including a response with just the hashes of the replaced transactions +which clients could proceed to query individually. + +#### Mempool transaction removal + +Additional logic would need to be added to the Tendermint mempool to clear out replaced +transactions after each block is executed. Tendermint currently removes executed transactions +from the mempool, so this would be a pretty straightforward change. + +## Discussion + +### What value may be added to Tendermint by introducing transaction replacement? + +Transaction replacement would would enable applications to aggregate or disaggregate transactions. + +For aggregation, a set of transactions that all related work, such as transferring +tokens between the same two accounts, could be replaced with a single transaction, +i.e. one that transfers a single sum from one account to the other. +Applications that make frequent use of aggregation may be able to achieve a higher throughput. +Aggregation would decrease the space occupied by a single client-submitted transaction in the block, allowing +more client-submitted transactions to be executed per block. + +For disaggregation, a very complex transaction could be split into multiple smaller transactions. +This may be useful if an application wishes to perform more fine-grained indexing on intermediate parts +of a multi-part transaction. + +### Drawbacks to transaction replacement + +Transaction replacement would require updating and shimming many of the places that +Tendermint records and exposes information about executed transactions. While +systems within Tendermint could be updated to account for transaction replacement, +such a system would leave new issues and rough edges. + +#### No way of guaranteeing correct replacement + +If a user issues a transaction to the network and the transaction is replaced, the +user has no guarantee that the replacement was correct. For example, suppose a set of users issue +transactions A, B, and C and they are all aggregated into a new transaction, D. +There is nothing guaranteeing that D was constructed correctly from the inputs. +The only way for users to ensure D is correct would be if D contained all of the +information of its constituent transactions, in which case, nothing is really gained by the replacement. + +#### Replacement transactions not signed by submitter + +Abstractly, Tendermint simply views transactions as a ball of bytes and therefore +should be fine with replacing one for another. However, many applications require +that transactions submitted to the chain be signed by some private key to authenticate +and authorize the transaction. Replaced transactions could not be signed by the +submitter, only by the application node. Therefore, any use of transaction replacement +could not contain authorization from the submitter and would either need to grant +application-submitted transactions power to perform application logic on behalf +of a user without their consent. + +Granting this power to application-submitted transactions would be very dangerous +and therefore might not be of much value to application developers. +Transaction replacement might only be really safe in the case of application-submitted +transactions or for transactions that require no authorization. For such transactions, +it's quite not quite clear what the utility of replacement is: the application can already +generate any transactions that it wants. The fact that such a transaction was a replacement +is not particularly relevant to participants in the chain since the application is +merely replacing its own transactions. + +#### New vector for censorship + +Depending on the implementation, transaction replacement may allow a node signal +to the rest of the chain that some transaction should no longer be considered for execution. +Honest nodes will use the replacement mechanism to signal that a transaction has been aggregated. +Malicious nodes will be granted a new vector for censoring transactions. +There is no guarantee that a replaced transactions is actually executed at all. +A malicious node could censor a transaction by simply listing it as replaced. +Honest nodes seeing the replacement would flush the transaction from their mempool +and not execute or propose it it in later blocks. + +### Transaction tracking implementations + +This section discusses possible ways to flesh out the implementation of transaction replacement. +Specifically, this section proposes a few alternative ways that Tendermint blockchains could +track and store transaction replacements. + +#### Include transaction replacements in the block + +One option to track transaction replacement is to include information on the +transaction replacement within the block. An additional structure may be added +the block of the following form: + +```proto +message Block { +... + repeated Replacement replacements = 5; +} + +message Replacement { + bytes included_tx_key = 1; + repeated bytes replaced_txs_keys = 2; +} +``` + +Applications executing `PrepareProposal` would return the list of replacements and +Tendermint would include an encoding of these replacements in the block that is gossiped +and committed. + +Tendermint's transaction indexing would include a new mapping for each replaced transaction +key to the committed transaction. +Transaction inclusion proofs would be updated to include these additional new transaction +keys in the Merkle tree and queries for transaction hashes that were replaced would return +information indicating that the transaction was replaced along with the hash of the +transaction that replaced it. + +Block validation of gossiped blocks would be updated to check that each of the +`included_txs_key` matches the hash of some transaction in the proposed block. + +Implementing the changes described in this section would allow Tendermint to gossip +and index transaction replacements as part of block propagation. These changes would +still require the application to certify that the replacements were valid. This +validation may be performed in one of two ways: + +1. **Applications optimistically trust that the proposer performed a legitimate replacement.** + +In this validation scheme, applications would not verify that the substitution +is valid during consensus and instead simply trust that the proposer is correct. +This would have the drawback of allowing a malicious proposer to remove transactions +it did not want executed. + +2. **Applications completely validate transaction replacement.** + +In this validation scheme, applications that allow replacement would check that +each listed replaced transaction was correctly reflected in the replacement transaction. +In order to perform such validation, the node would need to have the replaced transactions +locally. This could be accomplished one of a few ways: by querying the mempool, +by adding an additional p2p gossip channel for transaction replacements, or by including the replaced transactions +in the block. Replacement validation via mempool querying would require the node +to have received all of the replaced transactions in the mempool which is far from +guaranteed. Adding an additional gossip channel would make gossiping replaced transactions +a requirement for consensus to proceed, since all nodes would need to receive all replacement +messages before considering a block valid. Finally, including replaced transactions in +the block seems to obviate any benefit gained from performing a transaction replacement +since the replaced transaction and the original transactions would now both appear in the block. + +#### Application defined transaction replacement + +An additional option for allowing transaction replacement is to leave it entirely as a responsibility +of the application. The `PrepareProposal` ABCI++ call allows for applications to add +new transactions to a proposed block. Applications that wished to implement a transaction +replacement mechanism would be free to do so without the newly defined `new_hashes` field. +Applications wishing to implement transaction replacement would add the aggregated +transactions in the `PrepareProposal` response, and include one additional bookkeeping +transaction that listed all of the replacements, with a similar scheme to the `new_hashes` +field described in ABCI++. This new bookkeeping transaction could be used by the +application to determine which transactions to clear from the mempool in future calls +to `CheckTx`. + +The meaning of any transaction in the block is completely opaque to Tendermint, +so applications performing this style of replacement would not be able to have the replacement +reflected in any most of Tendermint's transaction tracking mechanisms, such as transaction indexing +and the `/tx` endpoint. + +#### Application defined Tx Keys + +Tendermint currently uses cryptographic hashes, SHA256, as a key for each transaction. +As noted in the section on systems that would require changing, this key is used +to identify the transaction in the mempool, in the indexer, and within the RPC system. + +An alternative approach to allowing `ProcessProposal` to specify a set of transaction +replacements would be instead to allow the application to specify an additional key or set +of keys for each transaction during `ProcessProposal`. This new `secondary_keys` set +would be included in the block and therefore gossiped during block propagation. +Additional RPC endpoints could be exposed to query by the application-defined keys. + +Applications wishing to implement replacement would leverage this new field by providing the +replaced transaction hashes as the `secondary_keys` and checking their validity during +`ProcessProposal`. During `RecheckTx` the application would then be responsible for +clearing out transactions that matched the `secondary_keys`. + +It is worth noting that something like this would be possible without `secondary_keys`. +An application wishing to implement a system like this one could define a replacement +transaction, as discussed in the section on application-defined transaction replacement, +and use a custom [ABCI event type][abci-event-type] to communicate that the replacement should +be indexed within Tendermint's ABCI event indexing. + +### Complexity to value-add tradeoff + +It is worth remarking that adding a system like this may introduce a decent amount +of new complexity into Tendermint. An approach that leaves much of the replacement +logic to Tendermint would require altering the core transaction indexing and querying +data. In many of the cases listed, a system for transaction replacement is possible +without explicitly defining it as part of `PrepareProposal`. Since applications +can now add transactions during `PrepareProposal` they can and should leverage this +functionality to include additional bookkeeping transactions in the block. It may +be worth encouraging applications to discover new and interesting ways to leverage this +power instead of immediately solving the problem for them. + +### References + +[inclusion-proof]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/types/tx.go#L67 +[tx-serach-result]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/rpc/coretypes/responses.go#L267 +[tx-rpc-func]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/rpc/core/tx.go#L21 +[tx-result-index]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/state/indexer/tx/kv/kv.go#L90 +[abci-event-type]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/abci/types/types.pb.go#L3168 diff --git a/docs/tendermint-core/consensus/proposer-based-timestamps.md b/docs/tendermint-core/consensus/proposer-based-timestamps.md new file mode 100644 index 000000000..7f98f10d6 --- /dev/null +++ b/docs/tendermint-core/consensus/proposer-based-timestamps.md @@ -0,0 +1,95 @@ +--- +order: 3 +--- + +# PBTS + + This document provides an overview of the Proposer-Based Timestamp (PBTS) + algorithm added to Tendermint in the v0.36 release. It outlines the core + functionality as well as the parameters and constraints of the this algorithm. + +## Algorithm Overview + +The PBTS algorithm defines a way for a Tendermint blockchain to create block +timestamps that are within a reasonable bound of the clocks of the validators on +the network. This replaces the original BFTTime algorithm for timestamp +assignment that relied on the timestamps included in precommit messages. + +## Algorithm Parameters + +The functionality of the PBTS algorithm is governed by two parameters within +Tendermint. These two parameters are [consensus +parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291), +meaning they are configured by the ABCI application and are expected to be the +same across all nodes on the network. + +### `Precision` + +The `Precision` parameter configures the acceptable upper-bound of clock drift +among all of the nodes on a Tendermint network. Any two nodes on a Tendermint +network are expected to have clocks that differ by at most `Precision` +milliseconds any given instant. + +### `MessageDelay` + +The `MessageDelay` parameter configures the acceptable upper-bound for +transmitting a `Proposal` message from the proposer to _all_ of the validators +on the network. + +Networks should choose as small a value for `MessageDelay` as is practical, +provided it is large enough that messages can reach all participants with high +probability given the number of participants and latency of their connections. + +## Algorithm Concepts + +### Block timestamps + +Each block produced by the Tendermint consensus engine contains a timestamp. +The timestamp produced in each block is a meaningful representation of time that is +useful for the protocols and applications built on top of Tendermint. + +The following protocols and application features require a reliable source of time: + +* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. +* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification). +* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 + days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime). +* IBC packets can use either a [timestamp or a height to timeout packet + delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements) + +### Proposer Selects a Block Timestamp + +When the proposer node creates a new block proposal, the node reads the time +from its local clock and uses this reading as the timestamp for the proposed +block. + +### Timeliness + +When each validator on a Tendermint network receives a proposed block, it +performs a series of checks to ensure that the block can be considered valid as +a candidate to be the next block in the chain. + +The PBTS algorithm performs a validity check on the timestamp of proposed +blocks. When a validator receives a proposal it ensures that the timestamp in +the proposal is within a bound of the validator's local clock. Specifically, the +algorithm checks that the timestamp is no more than `Precision` greater than the +node's local clock and no less than `Precision` + `MessageDelay` behind than the +node's local clock. This creates range of acceptable timestamps around the +node's local time. If the timestamp is within this range, the PBTS algorithm +considers the block **timely**. If a block is not **timely**, the node will +issue a `nil` `prevote` for this block, signaling to the rest of the network +that the node does not consider the block to be valid. + +### Clock Synchronization + +The PBTS algorithm requires the clocks of the validators on a Tendermint network +are within `Precision` of each other. In practice, this means that validators +should periodically synchronize to a reliable NTP server. Validators that drift +too far away from the rest of the network will no longer propose blocks with +valid timestamps. Additionally they will not view the timestamps of blocks +proposed by their peers to be valid either. + +## See Also + +* [The PBTS specification](https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md) + contains all of the details of the algorithm. diff --git a/docs/versions b/docs/versions index 90a8a4cf1..70754facc 100644 --- a/docs/versions +++ b/docs/versions @@ -1,3 +1,4 @@ +master master v0.33.x v0.33 v0.34.x v0.34 v0.35.x v0.35 diff --git a/go.mod b/go.mod index 136dd3af3..80db11417 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/rs/cors v1.8.2 github.com/rs/zerolog v1.26.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.3.0 + github.com/spf13/cobra v1.4.0 github.com/spf13/viper v1.10.1 github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.6 @@ -34,11 +34,17 @@ require ( golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.44.0 + google.golang.org/grpc v1.45.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) +require ( + github.com/creachadair/atomicfile v0.2.4 + github.com/google/go-cmp v0.5.7 + gotest.tools v2.2.0+incompatible +) + require ( 4d63.com/gochecknoglobals v0.1.0 // indirect github.com/Antonboom/errname v0.1.5 // indirect @@ -67,7 +73,6 @@ require ( github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect github.com/containerd/continuity v0.2.1 // indirect github.com/daixiang0/gci v0.3.1-0.20220208004058-76d765e3ab48 // indirect - github.com/creachadair/atomicfile v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingajkin/go-header v0.4.2 // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect @@ -107,7 +112,6 @@ require ( github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/btree v1.0.0 // indirect - github.com/google/go-cmp v0.5.7 // indirect github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect diff --git a/go.sum b/go.sum index cd3e0e4f5..5c1ebecc9 100644 --- a/go.sum +++ b/go.sum @@ -940,8 +940,9 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1625,8 +1626,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index f3d4a9e0a..4c905c660 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -168,7 +168,7 @@ func (pool *BlockPool) removeTimedoutPeers() { for _, peer := range pool.peers { // check if peer timed out if !peer.didTimeout && peer.numPending > 0 { - curRate := peer.recvMonitor.Status().CurRate + curRate := peer.recvMonitor.CurrentTransferRate() // curRate can be 0 on start if curRate != 0 && curRate < minRecvRate { err := errors.New("peer is not sending us data fast enough") diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index f4d69b8b0..cf1a10623 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -70,6 +70,8 @@ type Reactor struct { // immutable initialState sm.State + // store + stateStore sm.Store blockExec *sm.BlockExecutor store *store.BlockStore @@ -101,7 +103,7 @@ type Reactor struct { func NewReactor( ctx context.Context, logger log.Logger, - state sm.State, + stateStore sm.Store, blockExec *sm.BlockExecutor, store *store.BlockStore, consReactor consensusReactor, @@ -111,19 +113,6 @@ func NewReactor( metrics *consensus.Metrics, eventBus *eventbus.EventBus, ) (*Reactor, error) { - - if state.LastBlockHeight != store.Height() { - return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()) - } - - startHeight := store.Height() + 1 - if startHeight == 1 { - startHeight = state.InitialHeight - } - - requestsCh := make(chan BlockRequest, maxTotalRequesters) - errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. - blockSyncCh, err := channelCreator(ctx, GetChannelDescriptor()) if err != nil { return nil, err @@ -131,20 +120,16 @@ func NewReactor( r := &Reactor{ logger: logger, - initialState: state, + stateStore: stateStore, blockExec: blockExec, store: store, - pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh), consReactor: consReactor, blockSync: newAtomicBool(blockSync), - requestsCh: requestsCh, - errorsCh: errorsCh, blockSyncCh: blockSyncCh, blockSyncOutBridgeCh: make(chan p2p.Envelope), peerUpdates: peerUpdates, metrics: metrics, eventBus: eventBus, - syncStartTime: time.Time{}, } r.BaseService = *service.NewBaseService(logger, "BlockSync", r) @@ -159,6 +144,27 @@ func NewReactor( // If blockSync is enabled, we also start the pool and the pool processing // goroutine. If the pool fails to start, an error is returned. func (r *Reactor) OnStart(ctx context.Context) error { + state, err := r.stateStore.Load() + if err != nil { + return err + } + r.initialState = state + + if state.LastBlockHeight != r.store.Height() { + return fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, r.store.Height()) + } + + startHeight := r.store.Height() + 1 + if startHeight == 1 { + startHeight = state.InitialHeight + } + + requestsCh := make(chan BlockRequest, maxTotalRequesters) + errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. + r.pool = NewBlockPool(r.logger, startHeight, requestsCh, errorsCh) + r.requestsCh = requestsCh + r.errorsCh = errorsCh + if r.blockSync.IsSet() { if err := r.pool.Start(ctx); err != nil { return err diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 68656fbc3..14264e040 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -14,7 +15,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/eventbus" + mpmocks "github.com/tendermint/tendermint/internal/mempool/mocks" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/proxy" @@ -33,7 +35,7 @@ type reactorTestSuite struct { nodes []types.NodeID reactors map[types.NodeID]*Reactor - app map[types.NodeID]proxy.AppConns + app map[types.NodeID]abciclient.Client blockSyncChannels map[types.NodeID]*p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate @@ -64,7 +66,7 @@ func setup( network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), - app: make(map[types.NodeID]proxy.AppConns, numNodes), + app: make(map[types.NodeID]abciclient.Client, numNodes), blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), @@ -109,7 +111,7 @@ func (rts *reactorTestSuite) addNode( logger := log.TestingLogger() rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics()) + rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) require.NoError(t, rts.app[nodeID].Start(ctx)) blockDB := dbm.NewMemDB() @@ -120,14 +122,29 @@ func (rts *reactorTestSuite) addNode( state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) require.NoError(t, stateStore.Save(state)) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + + eventbus := eventbus.NewDefault(logger) + require.NoError(t, eventbus.Start(ctx)) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), - rts.app[nodeID].Consensus(), - mock.Mempool{}, + rts.app[nodeID], + mp, sm.EmptyEvidencePool{}, blockStore, + eventbus, ) for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { @@ -154,8 +171,7 @@ func (rts *reactorTestSuite) addNode( ) } - thisBlock, err := sf.MakeBlock(state, blockHeight, lastCommit) - require.NoError(t, err) + thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} @@ -176,7 +192,7 @@ func (rts *reactorTestSuite) addNode( rts.reactors[nodeID], err = NewReactor( ctx, rts.logger.With("nodeID", nodeID), - state.Copy(), + stateStore, blockExec, blockStore, nil, diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 33e1dbf63..221baf3e1 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -82,37 +82,33 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { log.TestingLogger().With("module", "mempool"), thisConfig.Mempool, proxyAppConnMem, - 0, ) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } + eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + // Make a full instance of the evidence pool evidenceDB := dbm.NewMemDB() - evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + evpool := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) // Make State - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(ctx, logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus) + cs, err := NewState(ctx, logger, thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus) + require.NoError(t, err) // set private validator pv := privVals[i] cs.SetPrivValidator(ctx, pv) - eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) - err = eventBus.Start(ctx) - require.NoError(t, err) - cs.SetEventBus(eventBus) - evpool.SetEventBus(eventBus) - cs.SetTimeoutTicker(tickerFunc()) states[i] = cs }() } - rts := setup(ctx, t, nValidators, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nValidators, states, 512) // buffer must be large enough to not deadlock var bzNodeID types.NodeID @@ -180,7 +176,6 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NotNil(t, lazyNodeState.privValidator) var commit *types.Commit - var votes []*types.Vote switch { case lazyNodeState.Height == lazyNodeState.state.InitialHeight: // We're creating a proposal for the first block. @@ -189,7 +184,6 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { case lazyNodeState.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = lazyNodeState.LastCommit.MakeCommit() - votes = lazyNodeState.LastCommit.GetVotes() default: // This shouldn't happen. lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return @@ -206,9 +200,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { } proposerAddr := lazyNodeState.privValidatorPubKey.Address() - block, blockParts, err := lazyNodeState.blockExec.CreateProposalBlock( - ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, votes, - ) + block, err := lazyNodeState.blockExec.CreateProposalBlock( + ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, nil) + require.NoError(t, err) + blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, @@ -238,8 +233,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { } for _, reactor := range rts.reactors { - state := reactor.state.GetState() - reactor.SwitchToConsensus(ctx, state, false) + reactor.SwitchToConsensus(ctx, reactor.state.GetState(), false) } // Evidence should be submitted and committed at the third height but @@ -248,20 +242,26 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { var wg sync.WaitGroup i := 0 + subctx, subcancel := context.WithCancel(ctx) + defer subcancel() for _, sub := range rts.subs { wg.Add(1) go func(j int, s eventbus.Subscription) { defer wg.Done() for { - if ctx.Err() != nil { + if subctx.Err() != nil { + return + } + + msg, err := s.Next(subctx) + if subctx.Err() != nil { return } - msg, err := s.Next(ctx) - assert.NoError(t, err) if err != nil { - cancel() + t.Errorf("waiting for subscription: %v", err) + subcancel() return } @@ -273,12 +273,18 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { } } }(i, sub) - i++ } wg.Wait() + // don't run more assertions if we've encountered a timeout + select { + case <-subctx.Done(): + t.Fatal("encountered timeout") + default: + } + pubkey, err := bzNodeState.privValidator.GetPubKey(ctx) require.NoError(t, err) @@ -290,267 +296,3 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { assert.Equal(t, prevoteHeight, ev.Height()) } } - -// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals). -// byzantine validator sends conflicting proposals into A and B, -// and prevotes/precommits on both of them. -// B sees a commit, A doesn't. -// Heal partition and ensure A sees the commit -func TestByzantineConflictingProposalsWithPartition(t *testing.T) { - // TODO: https://github.com/tendermint/tendermint/issues/6092 - t.SkipNow() - - // n := 4 - // logger := consensusLogger().With("test", "byzantine") - // app := newCounter - - // states, cleanup := randConsensusState(n, "consensus_byzantine_test", newMockTickerFunc(false), app) - // t.Cleanup(cleanup) - - // // give the byzantine validator a normal ticker - // ticker := NewTimeoutTicker() - // ticker.SetLogger(states[0].logger) - // states[0].SetTimeoutTicker(ticker) - - // p2pLogger := logger.With("module", "p2p") - - // blocksSubs := make([]types.Subscription, n) - // reactors := make([]p2p.Reactor, n) - // for i := 0; i < n; i++ { - // // enable txs so we can create different proposals - // assertMempool(states[i].txNotifier).EnableTxsAvailable() - - // eventBus := states[i].eventBus - // eventBus.SetLogger(logger.With("module", "events", "validator", i)) - - // var err error - // blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock) - // require.NoError(t, err) - - // conR := NewReactor(states[i], true) // so we don't start the consensus states - // conR.SetLogger(logger.With("validator", i)) - // conR.SetEventBus(eventBus) - - // var conRI p2p.Reactor = conR - - // // make first val byzantine - // if i == 0 { - // conRI = NewByzantineReactor(conR) - // } - - // reactors[i] = conRI - // err = states[i].blockExec.Store().Save(states[i].state) // for save height 1's validators info - // require.NoError(t, err) - // } - - // switches := p2p.MakeConnectedSwitches(config.P2P, N, func(i int, sw *p2p.Switch) *p2p.Switch { - // sw.SetLogger(p2pLogger.With("validator", i)) - // sw.AddReactor("CONSENSUS", reactors[i]) - // return sw - // }, func(sws []*p2p.Switch, i, j int) { - // // the network starts partitioned with globally active adversary - // if i != 0 { - // return - // } - // p2p.Connect2Switches(sws, i, j) - // }) - - // // make first val byzantine - // // NOTE: Now, test validators are MockPV, which by default doesn't - // // do any safety checks. - // states[0].privValidator.(types.MockPV).DisableChecks() - // states[0].decideProposal = func(j int32) func(int64, int32) { - // return func(height int64, round int32) { - // byzantineDecideProposalFunc(t, height, round, states[j], switches[j]) - // } - // }(int32(0)) - // // We are setting the prevote function to do nothing because the prevoting - // // and precommitting are done alongside the proposal. - // states[0].doPrevote = func(height int64, round int32) {} - - // defer func() { - // for _, sw := range switches { - // err := sw.Stop() - // require.NoError(t, err) - // } - // }() - - // // start the non-byz state machines. - // // note these must be started before the byz - // for i := 1; i < n; i++ { - // cr := reactors[i].(*Reactor) - // cr.SwitchToConsensus(cr.conS.GetState(), false) - // } - - // // start the byzantine state machine - // byzR := reactors[0].(*ByzantineReactor) - // s := byzR.reactor.conS.GetState() - // byzR.reactor.SwitchToConsensus(s, false) - - // // byz proposer sends one block to peers[0] - // // and the other block to peers[1] and peers[2]. - // // note peers and switches order don't match. - // peers := switches[0].Peers().List() - - // // partition A - // ind0 := getSwitchIndex(switches, peers[0]) - - // // partition B - // ind1 := getSwitchIndex(switches, peers[1]) - // ind2 := getSwitchIndex(switches, peers[2]) - // p2p.Connect2Switches(switches, ind1, ind2) - - // // wait for someone in the big partition (B) to make a block - // <-blocksSubs[ind2].Out() - - // t.Log("A block has been committed. Healing partition") - // p2p.Connect2Switches(switches, ind0, ind1) - // p2p.Connect2Switches(switches, ind0, ind2) - - // // wait till everyone makes the first new block - // // (one of them already has) - // wg := new(sync.WaitGroup) - // for i := 1; i < N-1; i++ { - // wg.Add(1) - // go func(j int) { - // <-blocksSubs[j].Out() - // wg.Done() - // }(i) - // } - - // done := make(chan struct{}) - // go func() { - // wg.Wait() - // close(done) - // }() - - // tick := time.NewTicker(time.Second * 10) - // select { - // case <-done: - // case <-tick.C: - // for i, reactor := range reactors { - // t.Log(fmt.Sprintf("Consensus Reactor %v", i)) - // t.Log(fmt.Sprintf("%v", reactor)) - // } - // t.Fatalf("Timed out waiting for all validators to commit first block") - // } -} - -// func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) { -// // byzantine user should create two proposals and try to split the vote. -// // Avoid sending on internalMsgQueue and running consensus state. - -// // Create a new proposal block from state/txs from the mempool. -// block1, blockParts1 := cs.createProposalBlock() -// polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()} -// proposal1 := types.NewProposal(height, round, polRound, propBlockID) -// p1 := proposal1.ToProto() -// if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil { -// t.Error(err) -// } - -// proposal1.Signature = p1.Signature - -// // some new transactions come in (this ensures that the proposals are different) -// deliverTxsRange(cs, 0, 1) - -// // Create a new proposal block from state/txs from the mempool. -// block2, blockParts2 := cs.createProposalBlock() -// polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()} -// proposal2 := types.NewProposal(height, round, polRound, propBlockID) -// p2 := proposal2.ToProto() -// if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil { -// t.Error(err) -// } - -// proposal2.Signature = p2.Signature - -// block1Hash := block1.Hash() -// block2Hash := block2.Hash() - -// // broadcast conflicting proposals/block parts to peers -// peers := sw.Peers().List() -// t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers)) -// for i, peer := range peers { -// if i < len(peers)/2 { -// go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1) -// } else { -// go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2) -// } -// } -// } - -// func sendProposalAndParts( -// height int64, -// round int32, -// cs *State, -// peer p2p.Peer, -// proposal *types.Proposal, -// blockHash []byte, -// parts *types.PartSet, -// ) { -// // proposal -// msg := &ProposalMessage{Proposal: proposal} -// peer.Send(DataChannel, MustEncode(msg)) - -// // parts -// for i := 0; i < int(parts.Total()); i++ { -// part := parts.GetPart(i) -// msg := &BlockPartMessage{ -// Height: height, // This tells peer that this part applies to us. -// Round: round, // This tells peer that this part applies to us. -// Part: part, -// } -// peer.Send(DataChannel, MustEncode(msg)) -// } - -// // votes -// cs.mtx.Lock() -// prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header()) -// precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header()) -// cs.mtx.Unlock() - -// peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) -// peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) -// } - -// type ByzantineReactor struct { -// service.Service -// reactor *Reactor -// } - -// func NewByzantineReactor(conR *Reactor) *ByzantineReactor { -// return &ByzantineReactor{ -// Service: conR, -// reactor: conR, -// } -// } - -// func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } -// func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() } - -// func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { -// if !br.reactor.IsRunning() { -// return -// } - -// // Create peerState for peer -// peerState := NewPeerState(peer).SetLogger(br.reactor.logger) -// peer.Set(types.PeerStateKey, peerState) - -// // Send our state to peer. -// // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). -// if !br.reactor.waitSync { -// br.reactor.sendNewRoundStepMessage(peer) -// } -// } - -// func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { -// br.reactor.RemovePeer(peer, reason) -// } - -// func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { -// br.reactor.Receive(chID, peer, msgBytes) -// } - -// func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 10955fc8c..b0f22e54f 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -69,6 +69,9 @@ func configSetup(t *testing.T) *config.Config { require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) }) + walDir := filepath.Dir(cfg.Consensus.WalFile()) + ensureDir(t, walDir, 0700) + return cfg } @@ -239,7 +242,9 @@ func decideProposal( t.Helper() cs1.mtx.Lock() - block, blockParts, err := cs1.createProposalBlock(ctx) + block, err := cs1.createProposalBlock(ctx) + require.NoError(t, err) + blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) validRound := cs1.ValidRound chainID := cs1.state.ChainID @@ -370,7 +375,11 @@ func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte) vote := msg.Data().(types.EventDataVote) // we only fire for our own votes if bytes.Equal(addr, vote.Vote.ValidatorAddress) { - ch <- msg + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- msg: + } } return nil }, types.EventQueryVote); err != nil { @@ -401,7 +410,10 @@ func subscribeToVoterBuffered(ctx context.Context, t *testing.T, cs *State, addr vote := msg.Data().(types.EventDataVote) // we only fire for our own votes if bytes.Equal(addr, vote.Vote.ValidatorAddress) { - ch <- msg + select { + case <-ctx.Done(): + case ch <- msg: + } } } }() @@ -462,7 +474,6 @@ func newStateWithConfigAndBlockStore( logger.With("module", "mempool"), thisConfig.Mempool, proxyAppConnMem, - 0, ) if thisConfig.Consensus.WaitForTxs() { @@ -476,22 +487,26 @@ func newStateWithConfigAndBlockStore( stateStore := sm.NewStore(stateDB) require.NoError(t, stateStore.Save(state)) - blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(ctx, + eventBus := eventbus.NewDefault(logger.With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore, eventBus) + cs, err := NewState(ctx, logger.With("module", "consensus"), thisConfig.Consensus, - state, + stateStore, blockExec, blockStore, mempool, evpool, + eventBus, ) - cs.SetPrivValidator(ctx, pv) + if err != nil { + t.Fatal(err) + } - eventBus := eventbus.NewDefault(logger.With("module", "events")) - require.NoError(t, eventBus.Start(ctx)) + cs.SetPrivValidator(ctx, pv) - cs.SetEventBus(eventBus) return cs } @@ -775,6 +790,7 @@ func makeConsensusState( configOpts ...func(*config.Config), ) ([]*State, cleanupFunc) { t.Helper() + tempDir := t.TempDir() valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30) genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) @@ -789,7 +805,7 @@ func makeConsensusState( blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(tempDir, fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) configRootDirs = append(configRootDirs, thisConfig.RootDir) @@ -798,7 +814,8 @@ func makeConsensusState( opt(thisConfig) } - ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + walDir := filepath.Dir(thisConfig.Consensus.WalFile()) + ensureDir(t, walDir, 0700) app := kvstore.NewApplication() closeFuncs = append(closeFuncs, app.Close) diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index dfdb2ecf6..033b096ba 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -5,6 +5,7 @@ import ( "errors" "sync" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,7 +21,7 @@ import ( ) func TestReactorInvalidPrecommit(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() config := configSetup(t) @@ -49,14 +50,14 @@ func TestReactorInvalidPrecommit(t *testing.T) { byzState := rts.states[node.NodeID] byzReactor := rts.reactors[node.NodeID] - calledDoPrevote := false + signal := make(chan struct{}) // Update the doPrevote function to just send a valid precommit for a random // block and otherwise disable the priv validator. byzState.mtx.Lock() privVal := byzState.privValidator byzState.doPrevote = func(ctx context.Context, height int64, round int32) { + defer close(signal) invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, privVal) - calledDoPrevote = true } byzState.mtx.Unlock() @@ -72,16 +73,30 @@ func TestReactorInvalidPrecommit(t *testing.T) { go func(s eventbus.Subscription) { defer wg.Done() _, err := s.Next(ctx) + if ctx.Err() != nil { + return + } if !assert.NoError(t, err) { cancel() // cancel other subscribers on failure } }(sub) } } + wait := make(chan struct{}) + go func() { defer close(wait); wg.Wait() }() - wg.Wait() - if !calledDoPrevote { - t.Fatal("test failed to run core logic") + select { + case <-wait: + if _, ok := <-signal; !ok { + t.Fatal("test condition did not fire") + } + case <-ctx.Done(): + if _, ok := <-signal; !ok { + t.Fatal("test condition did not fire after timeout") + return + } + case <-signal: + // test passed } } @@ -130,19 +145,27 @@ func invalidDoPrevoteFunc( cs.privValidator = nil // disable priv val so we don't do normal votes cs.mtx.Unlock() - count := 0 + r.mtx.Lock() + ids := make([]types.NodeID, 0, len(r.peers)) for _, ps := range r.peers { + ids = append(ids, ps.peerID) + } + r.mtx.Unlock() + + count := 0 + for _, peerID := range ids { count++ err := r.voteCh.Send(ctx, p2p.Envelope{ - To: ps.peerID, + To: peerID, Message: &tmcons.Vote{ Vote: precommit.ToProto(), }, }) // we want to have sent some of these votes, // but if the test completes without erroring - // and we get here, we shouldn't error - if errors.Is(err, context.Canceled) && count > 1 { + // or not sending any messages, then we should + // error. + if errors.Is(err, context.Canceled) && count > 0 { break } require.NoError(t, err) diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index f0bd18958..e527e8598 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -51,7 +51,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { ensureNewEventOnChannel(t, newBlockCh) // first block gets committed ensureNoNewEventOnChannel(t, newBlockCh) - deliverTxsRange(ctx, t, cs, 0, 1) + checkTxsRange(ctx, t, cs, 0, 1) ensureNewEventOnChannel(t, newBlockCh) // commit txs ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash ensureNoNewEventOnChannel(t, newBlockCh) @@ -118,7 +118,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { round = 0 ensureNewRound(t, newRoundCh, height, round) // first round at next height - deliverTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + checkTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but don't set a proposal so we get the next round ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) round++ // moving to the next round @@ -126,7 +126,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block } -func deliverTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) { +func checkTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) { t.Helper() // Deliver some txs. for i := start; i < end; i++ { @@ -159,7 +159,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 - go deliverTxsRange(ctx, t, cs, 0, int(numTxs)) + go checkTxsRange(ctx, t, cs, 0, int(numTxs)) startTestRound(ctx, cs, cs.Height, cs.Round) for n := int64(0); n < numTxs; { @@ -192,8 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(0)) - resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) - assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver)) + resFinalize := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) + assert.False(t, resFinalize.TxResults[0].IsErr(), fmt.Sprintf("expected no error. got %v", resFinalize)) resCommit := app.Commit() assert.True(t, len(resCommit.Data) > 0) @@ -212,7 +212,7 @@ func TestMempoolRmBadTx(t *testing.T) { checkTxRespCh <- struct{}{} }, mempool.TxInfo{}) if err != nil { - t.Errorf("error after CheckTx: %w", err) + t.Errorf("error after CheckTx: %v", err) return } @@ -265,20 +265,20 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { } func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock { - respTxs := make([]*abci.ResponseDeliverTx, len(req.Txs)) + respTxs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { txValue := txAsUint64(tx) if txValue != uint64(app.txCount) { - respTxs[i] = &abci.ResponseDeliverTx{ + respTxs[i] = &abci.ExecTxResult{ Code: code.CodeTypeBadNonce, Log: fmt.Sprintf("Invalid nonce. Expected %d, got %d", app.txCount, txValue), } continue } app.txCount++ - respTxs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK} + respTxs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK} } - return abci.ResponseFinalizeBlock{Txs: respTxs} + return abci.ResponseFinalizeBlock{TxResults: respTxs} } func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { @@ -310,7 +310,7 @@ func (app *CounterApplication) Commit() abci.ResponseCommit { func (app *CounterApplication) PrepareProposal( req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { - return abci.ResponsePrepareProposal{BlockData: req.BlockData} + return abci.ResponsePrepareProposal{} } func (app *CounterApplication) ProcessProposal( diff --git a/internal/consensus/pbts_test.go b/internal/consensus/pbts_test.go index f99ec7e39..1cb6892c4 100644 --- a/internal/consensus/pbts_test.go +++ b/internal/consensus/pbts_test.go @@ -203,7 +203,7 @@ func (p *pbtsTestHarness) nextHeight(ctx context.Context, t *testing.T, proposer ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound) - b, _, err := p.observedState.createProposalBlock(ctx) + b, err := p.observedState.createProposalBlock(ctx) require.NoError(t, err) b.Height = p.currentHeight b.Header.Height = p.currentHeight diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index eb038d9f5..efb3f2d04 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -138,6 +138,7 @@ func NewReactor( cs *State, channelCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, + eventBus *eventbus.EventBus, waitSync bool, metrics *Metrics, ) (*Reactor, error) { @@ -166,6 +167,7 @@ func NewReactor( state: cs, waitSync: waitSync, peers: make(map[types.NodeID]*PeerState), + eventBus: eventBus, Metrics: metrics, stateCh: stateCh, dataCh: dataCh, @@ -226,12 +228,6 @@ func (r *Reactor) OnStop() { } } -// SetEventBus sets the reactor's event bus. -func (r *Reactor) SetEventBus(b *eventbus.EventBus) { - r.eventBus = b - r.state.SetEventBus(b) -} - // WaitSync returns whether the consensus reactor is waiting for state/block sync. func (r *Reactor) WaitSync() bool { r.mtx.RLock() diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index f01d013b3..ef816d85f 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "os" - "path" "sync" "testing" "time" @@ -110,13 +109,12 @@ func setup( state, chCreator(nodeID), node.MakePeerUpdates(ctx, t), + state.eventBus, true, NopMetrics(), ) require.NoError(t, err) - reactor.SetEventBus(state.eventBus) - blocksSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: testSubscriber, Query: types.EventQueryNewBlock, @@ -461,12 +459,12 @@ func TestReactorWithEvidence(t *testing.T) { stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) + require.NoError(t, stateStore.Save(state)) thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) - ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal app := kvstore.NewApplication() vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) @@ -483,7 +481,6 @@ func TestReactorWithEvidence(t *testing.T) { log.TestingLogger().With("module", "mempool"), thisConfig.Mempool, proxyAppConnMem, - 0, ) if thisConfig.Consensus.WaitForTxs() { @@ -504,15 +501,15 @@ func TestReactorWithEvidence(t *testing.T) { evpool2 := sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - - cs := NewState(ctx, logger.With("validator", i, "module", "consensus"), - thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) - cs.SetPrivValidator(ctx, pv) - eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) require.NoError(t, eventBus.Start(ctx)) - cs.SetEventBus(eventBus) + + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus) + + cs, err := NewState(ctx, logger.With("validator", i, "module", "consensus"), + thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool2, eventBus) + require.NoError(t, err) + cs.SetPrivValidator(ctx, pv) cs.SetTimeoutTicker(tickerFunc()) @@ -565,7 +562,6 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { c.Consensus.CreateEmptyBlocks = false }, ) - t.Cleanup(cleanup) rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 6250ffc06..5d097df21 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -10,6 +10,7 @@ import ( "reflect" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/internal/eventbus" @@ -204,7 +205,7 @@ type Handshaker struct { stateStore sm.Store initialState sm.State store sm.BlockStore - eventBus types.BlockEventPublisher + eventBus *eventbus.EventBus genDoc *types.GenesisDoc logger log.Logger @@ -216,7 +217,7 @@ func NewHandshaker( stateStore sm.Store, state sm.State, store sm.BlockStore, - eventBus types.BlockEventPublisher, + eventBus *eventbus.EventBus, genDoc *types.GenesisDoc, ) *Handshaker { @@ -237,10 +238,10 @@ func (h *Handshaker) NBlocks() int { } // TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error { +func (h *Handshaker) Handshake(ctx context.Context, appClient abciclient.Client) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo) + res, err := appClient.Info(ctx, proxy.RequestInfo) if err != nil { return fmt.Errorf("error calling Info: %w", err) } @@ -264,7 +265,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) + _, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, appClient) if err != nil { return fmt.Errorf("error on replay: %w", err) } @@ -285,7 +286,7 @@ func (h *Handshaker) ReplayBlocks( state sm.State, appHash []byte, appBlockHeight int64, - proxyApp proxy.AppConns, + appClient abciclient.Client, ) ([]byte, error) { storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() @@ -316,7 +317,7 @@ func (h *Handshaker) ReplayBlocks( Validators: nextVals, AppStateBytes: h.genDoc.AppState, } - res, err := proxyApp.Consensus().InitChain(ctx, req) + res, err := appClient.InitChain(ctx, req) if err != nil { return nil, err } @@ -390,7 +391,7 @@ func (h *Handshaker) ReplayBlocks( // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! @@ -405,7 +406,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, true) case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), @@ -413,7 +414,7 @@ func (h *Handshaker) ReplayBlocks( // NOTE: We could instead use the cs.WAL on cs.Start, // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT h.logger.Info("Replay last block using real app") - state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus()) + state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient) return state.AppHash, err case appBlockHeight == storeBlockHeight: @@ -426,6 +427,9 @@ func (h *Handshaker) ReplayBlocks( if err != nil { return nil, err } + if err := mockApp.Start(ctx); err != nil { + return nil, err + } h.logger.Info("Replay last block using mock app") state, err = h.replayBlock(ctx, state, storeBlockHeight, mockApp) @@ -445,7 +449,7 @@ func (h *Handshaker) ReplayBlocks( func (h *Handshaker) replayBlocks( ctx context.Context, state sm.State, - proxyApp proxy.AppConns, + appClient abciclient.Client, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { @@ -480,17 +484,15 @@ func (h *Handshaker) replayBlocks( if i == finalBlock && !mutateState { // We emit events for the index services at the final block due to the sync issue when // the node shutdown during the block committing status. - blockExec := sm.NewBlockExecutor( - h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{}, h.store) - blockExec.SetEventBus(h.eventBus) + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus) appHash, err = sm.ExecCommitBlock(ctx, - blockExec, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) + blockExec, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) if err != nil { return nil, err } } else { appHash, err = sm.ExecCommitBlock(ctx, - nil, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) + nil, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) if err != nil { return nil, err } @@ -501,7 +503,7 @@ func (h *Handshaker) replayBlocks( if mutateState { // sync the final block - state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus()) + state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient) if err != nil { return nil, err } @@ -517,15 +519,14 @@ func (h *Handshaker) replayBlock( ctx context.Context, state sm.State, height int64, - proxyApp proxy.AppConnConsensus, + appClient abciclient.Client, ) (sm.State, error) { block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) // Use stubs for both mempool and evidence pool since no transactions nor // evidence are needed here - block already exists. - blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, h.store) - blockExec.SetEventBus(h.eventBus) + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus) var err error state, err = blockExec.ApplyBlock(ctx, state, meta.BlockID, block) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 310eb0ab6..492d1d1ee 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -84,7 +84,7 @@ func (cs *State) ReplayFile(ctx context.Context, file string, console bool) erro return err } - pb := newPlayback(file, fp, cs, cs.state.Copy()) + pb := newPlayback(file, fp, cs, cs.stateStore) defer pb.fp.Close() var nextN int // apply N msgs in a row @@ -126,17 +126,17 @@ type playback struct { count int // how many lines/msgs into the file are we // replays can be reset to beginning - fileName string // so we can close/reopen the file - genesisState sm.State // so the replay session knows where to restart from + fileName string // so we can close/reopen the file + stateStore sm.Store } -func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *playback { +func newPlayback(fileName string, fp *os.File, cs *State, store sm.Store) *playback { return &playback{ - cs: cs, - fp: fp, - fileName: fileName, - genesisState: genState, - dec: NewWALDecoder(fp), + cs: cs, + fp: fp, + fileName: fileName, + stateStore: store, + dec: NewWALDecoder(fp), } } @@ -145,9 +145,11 @@ func (pb *playback) replayReset(ctx context.Context, count int, newStepSub event pb.cs.Stop() pb.cs.Wait() - newCS := NewState(ctx, pb.cs.logger, pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, - pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool) - newCS.SetEventBus(pb.cs.eventBus) + newCS, err := NewState(ctx, pb.cs.logger, pb.cs.config, pb.stateStore, pb.cs.blockExec, + pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, pb.cs.eventBus) + if err != nil { + return err + } newCS.startForReplay() if err := pb.fp.Close(); err != nil { @@ -323,9 +325,12 @@ func newConsensusStateForReplay( return nil, err } - // Create proxyAppConn connection (consensus, mempool, query) - clientCreator, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + client, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + if err != nil { + return nil, err + } + + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) err = proxyApp.Start(ctx) if err != nil { return nil, fmt.Errorf("starting proxy app conns: %w", err) @@ -343,11 +348,12 @@ func newConsensusStateForReplay( } mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mempool, evpool, blockStore) - - consensusState := NewState(ctx, logger, csConfig, state.Copy(), blockExec, - blockStore, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mempool, evpool, blockStore, eventBus) - consensusState.SetEventBus(eventBus) + consensusState, err := NewState(ctx, logger, csConfig, stateStore, blockExec, + blockStore, mempool, evpool, eventBus) + if err != nil { + return nil, err + } return consensusState, nil } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 08eed5d69..e479d344e 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -32,7 +32,7 @@ func (emptyMempool) Update( _ context.Context, _ int64, _ types.Txs, - _ []*abci.ResponseDeliverTx, + _ []*abci.ExecTxResult, _ mempool.PreCheckFunc, _ mempool.PostCheckFunc, ) error { @@ -61,22 +61,11 @@ func newMockProxyApp( logger log.Logger, appHash []byte, abciResponses *tmstate.ABCIResponses, -) (proxy.AppConnConsensus, error) { - - clientCreator := abciclient.NewLocalCreator(&mockProxyApp{ +) (abciclient.Client, error) { + return proxy.New(abciclient.NewLocalClient(logger, &mockProxyApp{ appHash: appHash, abciResponses: abciResponses, - }) - cli, err := clientCreator(logger) - if err != nil { - return nil, err - } - - if err = cli.Start(ctx); err != nil { - return nil, err - } - - return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()), nil + }), logger, proxy.NopMetrics()), nil } type mockProxyApp struct { diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index b9302d125..c735e9977 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -35,7 +35,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -381,7 +380,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx1, nil, mempool.TxInfo{}) assert.NoError(t, err) - propBlock, _, err := css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlock, err := css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) require.NoError(t, err) propBlockParts, err := propBlock.MakePartSet(partSize) require.NoError(t, err) @@ -415,7 +414,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, updateValidatorTx1, nil, mempool.TxInfo{}) assert.NoError(t, err) - propBlock, _, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) require.NoError(t, err) propBlockParts, err = propBlock.MakePartSet(partSize) require.NoError(t, err) @@ -456,7 +455,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx3, nil, mempool.TxInfo{}) assert.NoError(t, err) - propBlock, _, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) require.NoError(t, err) propBlockParts, err = propBlock.MakePartSet(partSize) require.NoError(t, err) @@ -544,7 +543,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, removeValidatorTx3, nil, mempool.TxInfo{}) assert.NoError(t, err) - propBlock, _, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) require.NoError(t, err) propBlockParts, err = propBlock.MakePartSet(partSize) require.NoError(t, err) @@ -652,61 +651,6 @@ func TestHandshakeReplayNone(t *testing.T) { } } -// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx -func TestMockProxyApp(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - sim := setupSimulator(ctx, t) // setup config and simulator - cfg := sim.Config - assert.NotNil(t, cfg) - - logger := log.TestingLogger() - var validTxs, invalidTxs = 0, 0 - txCount := 0 - - assert.NotPanics(t, func() { - abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses) - abciResWithEmptyDeliverTx.FinalizeBlock = new(abci.ResponseFinalizeBlock) - abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0) - abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{}) - - // called when saveABCIResponses: - bytes, err := proto.Marshal(abciResWithEmptyDeliverTx) - require.NoError(t, err) - loadedAbciRes := new(tmstate.ABCIResponses) - - // this also happens sm.LoadABCIResponses - err = proto.Unmarshal(bytes, loadedAbciRes) - require.NoError(t, err) - - mock, err := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes) - require.NoError(t, err) - - abciRes := new(tmstate.ABCIResponses) - abciRes.FinalizeBlock = new(abci.ResponseFinalizeBlock) - abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs)) - - someTx := []byte("tx") - resp, err := mock.FinalizeBlock(ctx, abci.RequestFinalizeBlock{Txs: [][]byte{someTx}}) - require.NoError(t, err) - // TODO: make use of res.Log - // TODO: make use of this info - // Blocks may include invalid txs. - for _, tx := range resp.Txs { - if tx.Code == abci.CodeTypeOK { - validTxs++ - } else { - invalidTxs++ - } - txCount++ - } - }) - require.Equal(t, 1, txCount) - require.Equal(t, 1, validTxs) - require.Zero(t, invalidTxs) -} - func tempWALWithData(t *testing.T, data []byte) string { t.Helper() @@ -804,16 +748,19 @@ func testHandshakeReplay( filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) }) - clientCreator2 := abciclient.NewLocalCreator(kvstoreApp) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + clientCreator2 := abciclient.NewLocalClient(logger, kvstoreApp) if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) + proxyApp := proxy.New(clientCreator2, logger, proxy.NopMetrics()) stateDB1 := dbm.NewMemDB() stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) require.NoError(t, err) - buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) + buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, eventBus, nBlocks, mode, store) } // Prune block store if requested @@ -828,10 +775,11 @@ func testHandshakeReplay( // now start the app using the handshake - it should sync genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) - proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) + handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc) + proxyApp := proxy.New(clientCreator2, logger, proxy.NopMetrics()) require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections") - + require.True(t, proxyApp.IsRunning()) + require.NotNil(t, proxyApp) t.Cleanup(func() { cancel(); proxyApp.Wait() }) err = handshaker.Handshake(ctx, proxyApp) @@ -842,7 +790,7 @@ func testHandshakeReplay( require.NoError(t, err, "Error on abci handshake") // get the latest app hash from the app - res, err := proxyApp.Query().Info(ctx, abci.RequestInfo{Version: ""}) + res, err := proxyApp.Info(ctx, abci.RequestInfo{Version: ""}) if err != nil { t.Fatal(err) } @@ -875,11 +823,12 @@ func applyBlock( evpool sm.EvidencePool, st sm.State, blk *types.Block, - proxyApp proxy.AppConns, + appClient abciclient.Client, blockStore *mockBlockStore, + eventBus *eventbus.EventBus, ) sm.State { testPartSize := types.BlockPartSizeBytes - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), appClient, mempool, evpool, blockStore, eventBus) bps, err := blk.MakePartSet(testPartSize) require.NoError(t, err) @@ -892,23 +841,24 @@ func applyBlock( func buildAppStateFromChain( ctx context.Context, t *testing.T, - proxyApp proxy.AppConns, + appClient abciclient.Client, stateStore sm.Store, mempool mempool.Mempool, evpool sm.EvidencePool, state sm.State, chain []*types.Block, + eventBus *eventbus.EventBus, nBlocks int, mode uint, blockStore *mockBlockStore, ) { t.Helper() // start a new app without handshake, play nBlocks blocks - require.NoError(t, proxyApp.Start(ctx)) + require.NoError(t, appClient.Start(ctx)) state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - _, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{ + _, err := appClient.InitChain(ctx, abci.RequestInitChain{ Validators: validators, }) require.NoError(t, err) @@ -919,18 +869,18 @@ func buildAppStateFromChain( case 0: for i := 0; i < nBlocks; i++ { block := chain[i] - state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus) } case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] - state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus) } if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not - state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], appClient, blockStore, eventBus) } default: require.Fail(t, "unknown mode %v", mode) @@ -958,37 +908,40 @@ func buildTMStateFromChain( kvstoreApp := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) defer kvstoreApp.Close() - clientCreator := abciclient.NewLocalCreator(kvstoreApp) + client := abciclient.NewLocalClient(logger, kvstoreApp) - proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) require.NoError(t, proxyApp.Start(ctx)) state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - _, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{ + _, err := proxyApp.InitChain(ctx, abci.RequestInitChain{ Validators: validators, }) require.NoError(t, err) require.NoError(t, stateStore.Save(state)) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + switch mode { case 0: // sync right up for _, block := range chain { - state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus) } case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { - state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus) } // apply the final block to a state copy so we can // get the right next appHash but keep the state back - applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore) + applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore, eventBus) default: require.Fail(t, "unknown mode %v", mode) } @@ -1025,20 +978,23 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { logger := log.TestingLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + // 2. Tendermint must panic if app returns wrong hash for the first block // - RANDOM HASH // - 0x02 // - 0x03 { app := &badApp{numBlocks: 3, allHashesAreWrong: true} - clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + client := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) err := proxyApp.Start(ctx) require.NoError(t, err) t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { - h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc) if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } @@ -1051,14 +1007,14 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - RANDOM HASH { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} - clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + client := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) err := proxyApp.Start(ctx) require.NoError(t, err) t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { - h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc) if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } @@ -1282,12 +1238,16 @@ func TestHandshakeUpdatesValidators(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + logger := log.NewNopLogger() votePower := 10 + int64(rand.Uint32()) val, _, err := factory.Validator(ctx, votePower) require.NoError(t, err) vals := types.NewValidatorSet([]*types.Validator{val}) app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} - clientCreator := abciclient.NewLocalCreator(app) + client := abciclient.NewLocalClient(logger, app) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) cfg, err := ResetConfig(t.TempDir(), "handshake_test_") require.NoError(t, err) @@ -1306,9 +1266,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) { genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - logger := log.TestingLogger() - handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) - proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections") require.NoError(t, handshaker.Handshake(ctx, proxyApp), "error on abci handshake") diff --git a/internal/consensus/state.go b/internal/consensus/state.go index e54fa3859..4f0f2c680 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -20,6 +20,7 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/jsontypes" + "github.com/tendermint/tendermint/internal/libs/autofile" sm "github.com/tendermint/tendermint/internal/state" tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" @@ -121,6 +122,9 @@ type State struct { // store blocks and commits blockStore sm.BlockStore + stateStore sm.Store + initialStatePopulated bool + // create and execute blocks blockExec *sm.BlockExecutor @@ -189,18 +193,21 @@ func NewState( ctx context.Context, logger log.Logger, cfg *config.ConsensusConfig, - state sm.State, + store sm.Store, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, txNotifier txNotifier, evpool evidencePool, + eventBus *eventbus.EventBus, options ...StateOption, -) *State { +) (*State, error) { cs := &State{ + eventBus: eventBus, logger: logger, config: cfg, blockExec: blockExec, blockStore: blockStore, + stateStore: store, txNotifier: txNotifier, peerMsgQueue: make(chan msgInfo, msgQueueSize), internalMsgQueue: make(chan msgInfo, msgQueueSize), @@ -220,27 +227,40 @@ func NewState( cs.doPrevote = cs.defaultDoPrevote cs.setProposal = cs.defaultSetProposal - // We have no votes, so reconstruct LastCommit from SeenCommit. - if state.LastBlockHeight > 0 { - cs.reconstructLastCommit(state) + if err := cs.updateStateFromStore(ctx); err != nil { + return nil, err } - cs.updateToState(ctx, state) - // NOTE: we do not call scheduleRound0 yet, we do that upon Start() - cs.BaseService = *service.NewBaseService(logger, "State", cs) for _, option := range options { option(cs) } - return cs + return cs, nil } -// SetEventBus sets event bus. -func (cs *State) SetEventBus(b *eventbus.EventBus) { - cs.eventBus = b - cs.blockExec.SetEventBus(b) +func (cs *State) updateStateFromStore(ctx context.Context) error { + if cs.initialStatePopulated { + return nil + } + state, err := cs.stateStore.Load() + if err != nil { + return fmt.Errorf("loading state: %w", err) + } + if state.IsEmpty() { + return nil + } + + // We have no votes, so reconstruct LastCommit from SeenCommit. + if state.LastBlockHeight > 0 { + cs.reconstructLastCommit(state) + } + + cs.updateToState(ctx, state) + + cs.initialStatePopulated = true + return nil } // StateMetrics sets the metrics. @@ -365,6 +385,10 @@ func (cs *State) LoadCommit(height int64) *types.Commit { // OnStart loads the latest state via the WAL, and starts the timeout and // receive routines. func (cs *State) OnStart(ctx context.Context) error { + if err := cs.updateStateFromStore(ctx); err != nil { + return err + } + // We may set the WAL in testing before calling Start, so only OpenWAL if its // still the nilWAL. if _, ok := cs.wal.(nilWAL); ok { @@ -846,15 +870,27 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) { defer func() { if r := recover(); r != nil { cs.logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) - // stop gracefully - // - // NOTE: We most probably shouldn't be running any further when there is - // some unexpected panic. Some unknown error happened, and so we don't - // know if that will result in the validator signing an invalid thing. It - // might be worthwhile to explore a mechanism for manual resuming via - // some console or secure RPC system, but for now, halting the chain upon - // unexpected consensus bugs sounds like the better option. + + // Make a best-effort attempt to close the WAL, but otherwise do not + // attempt to gracefully terminate. Once consensus has irrecoverably + // failed, any additional progress we permit the node to make may + // complicate diagnosing and recovering from the failure. onExit(cs) + + // Re-panic to ensure the node terminates. + // + // TODO(creachadair): In ordinary operation, the WAL autofile should + // never be closed. This only happens during shutdown and production + // nodes usually halt by panicking. Many existing tests, however, + // assume a clean shutdown is possible. Prior to #8111, we were + // swallowing the panic in receiveRoutine, making that appear to + // work. Filtering this specific error is slightly risky, but should + // affect only unit tests. In any case, not re-panicking here only + // preserves the pre-existing behavior for this one error type. + if err, ok := r.(error); ok && errors.Is(err, autofile.ErrAutoFileClosed) { + return + } + panic(r) } }() @@ -867,14 +903,11 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) { } } - rs := cs.RoundState - var mi msgInfo - select { case <-cs.txNotifier.TxsAvailable(): cs.handleTxsAvailable(ctx) - case mi = <-cs.peerMsgQueue: + case mi := <-cs.peerMsgQueue: if err := cs.wal.Write(mi); err != nil { cs.logger.Error("failed writing to WAL", "err", err) } @@ -883,11 +916,11 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) { // may generate internal events (votes, complete proposals, 2/3 majorities) cs.handleMsg(ctx, mi) - case mi = <-cs.internalMsgQueue: + case mi := <-cs.internalMsgQueue: err := cs.wal.WriteSync(mi) // NOTE: fsync if err != nil { - panic(fmt.Sprintf( - "failed to write %v msg to consensus WAL due to %v; check your file system and restart the node", + panic(fmt.Errorf( + "failed to write %v msg to consensus WAL due to %w; check your file system and restart the node", mi, err, )) } @@ -902,7 +935,7 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) { // if the timeout is relevant to the rs // go to the next step - cs.handleTimeout(ctx, ti, rs) + cs.handleTimeout(ctx, ti, cs.RoundState) case <-ctx.Done(): onExit(cs) @@ -964,13 +997,11 @@ func (cs *State) handleMsg(ctx context.Context, mi msgInfo) { } } - // if err == ErrAddingVote { // TODO: punish peer // We probably don't want to stop the peer here. The vote does not // necessarily comes from a malicious peer but can be just broadcasted by // a typical peer. // https://github.com/tendermint/tendermint/issues/1281 - // } // NOTE: the vote is broadcast to peers by the reactor listening // for vote events @@ -1269,8 +1300,16 @@ func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round } else { // Create a new proposal block from state/txs from the mempool. var err error - block, blockParts, err = cs.createProposalBlock(ctx) - if block == nil || err != nil { + block, err = cs.createProposalBlock(ctx) + if err != nil { + cs.logger.Error("unable to create proposal block", "error", err) + return + } else if block == nil { + return + } + blockParts, err = block.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + cs.logger.Error("unable to create proposal block part set", "error", err) return } } @@ -1329,13 +1368,12 @@ func (cs *State) isProposalComplete() bool { // // NOTE: keep it side-effect free for clarity. // CONTRACT: cs.privValidator is not nil. -func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, blockParts *types.PartSet, err error) { +func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error) { if cs.privValidator == nil { - return nil, nil, errors.New("entered createProposalBlock with privValidator being nil") + return nil, errors.New("entered createProposalBlock with privValidator being nil") } var commit *types.Commit - var votes []*types.Vote switch { case cs.Height == cs.state.InitialHeight: // We're creating a proposal for the first block. @@ -1345,23 +1383,22 @@ func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, b case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = cs.LastCommit.MakeCommit() - votes = cs.LastCommit.GetVotes() default: // This shouldn't happen. cs.logger.Error("propose step; cannot propose anything without commit for the previous block") - return + return nil, nil } if cs.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. cs.logger.Error("propose step; empty priv validator public key", "err", errPubKeyIsNotSet) - return + return nil, nil } proposerAddr := cs.privValidatorPubKey.Address() - return cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr, votes) + return cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr, cs.LastCommit.GetVotes()) } // Enter: `timeoutPropose` after entering Propose. @@ -1880,8 +1917,8 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) { // restart). endMsg := EndHeightMessage{height} if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync - panic(fmt.Sprintf( - "failed to write %v msg to consensus WAL due to %v; check your file system and restart the node", + panic(fmt.Errorf( + "failed to write %v msg to consensus WAL due to %w; check your file system and restart the node", endMsg, err, )) } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index f1cfc225c..f008e75d3 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -220,7 +220,7 @@ func TestStateBadProposal(t *testing.T) { proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) - propBlock, _, err := cs1.createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlock, err := cs1.createProposalBlock(ctx) // changeProposer(t, cs1, vs2) require.NoError(t, err) // make the second validator the proposer by incrementing round @@ -282,7 +282,7 @@ func TestStateOversizedBlock(t *testing.T) { timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) - propBlock, _, err := cs1.createProposalBlock(ctx) + propBlock, err := cs1.createProposalBlock(ctx) require.NoError(t, err) propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} propBlock.Header.DataHash = propBlock.Data.Hash() @@ -1965,6 +1965,79 @@ func TestProcessProposalAccept(t *testing.T) { } } +func TestFinalizeBlockCalled(t *testing.T) { + for _, testCase := range []struct { + name string + voteNil bool + expectCalled bool + }{ + { + name: "finalze block called when block committed", + voteNil: false, + expectCalled: true, + }, + { + name: "not called when block not committed", + voteNil: true, + expectCalled: false, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m := abcimocks.NewBaseMock() + m.On("ProcessProposal", mock.Anything).Return(abcitypes.ResponseProcessProposal{Accept: true}) + m.On("VerifyVoteExtension", mock.Anything).Return(abcitypes.ResponseVerifyVoteExtension{ + Result: abcitypes.ResponseVerifyVoteExtension_ACCEPT, + }) + m.On("FinalizeBlock", mock.Anything).Return(abcitypes.ResponseFinalizeBlock{}).Maybe() + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) + height, round := cs1.Height, cs1.Round + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + + blockID := types.BlockID{} + nextRound := round + 1 + nextHeight := height + if !testCase.voteNil { + nextRound = 0 + nextHeight = height + 1 + blockID = types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + } + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) + ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash()) + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) + ensurePrecommit(t, voteCh, height, round) + + ensureNewRound(t, newRoundCh, nextHeight, nextRound) + m.AssertExpectations(t) + + if !testCase.expectCalled { + m.AssertNotCalled(t, "FinalizeBlock", mock.Anything) + } else { + m.AssertCalled(t, "FinalizeBlock", mock.Anything) + } + }) + } +} + // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round @@ -2559,7 +2632,7 @@ func TestStateTimestamp_ProposalNotMatch(t *testing.T) { addr := pv1.Address() voteCh := subscribeToVoter(ctx, t, cs1, addr) - propBlock, _, err := cs1.createProposalBlock(ctx) + propBlock, err := cs1.createProposalBlock(ctx) require.NoError(t, err) round++ incrementRound(vss[1:]...) @@ -2607,7 +2680,7 @@ func TestStateTimestamp_ProposalMatch(t *testing.T) { addr := pv1.Address() voteCh := subscribeToVoter(ctx, t, cs1, addr) - propBlock, _, err := cs1.createProposalBlock(ctx) + propBlock, err := cs1.createProposalBlock(ctx) require.NoError(t, err) round++ incrementRound(vss[1:]...) diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 57f6d6704..e99c054c0 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -30,8 +30,10 @@ import ( // stripped down version of node (proxy app, event bus, consensus state) with a // persistent kvstore application and special consensus wal instance // (byteBufferWAL) and waits until numBlocks are created. -// If the node fails to produce given numBlocks, it returns an error. -func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) (err error) { +// If the node fails to produce given numBlocks, it fails the test. +func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) { + t.Helper() + cfg := getConfig(t) app := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), "wal_generator")) @@ -46,41 +48,46 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { - return err + t.Fatal(err) } genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) if err != nil { - return fmt.Errorf("failed to read genesis file: %w", err) + t.Fatal(fmt.Errorf("failed to read genesis file: %w", err)) } blockStoreDB := dbm.NewMemDB() stateDB := blockStoreDB stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) if err != nil { - return fmt.Errorf("failed to make genesis state: %w", err) + t.Fatal(fmt.Errorf("failed to make genesis state: %w", err)) } state.Version.Consensus.App = kvstore.ProtocolVersion if err = stateStore.Save(state); err != nil { - t.Error(err) + t.Fatal(err) } blockStore := store.NewBlockStore(blockStoreDB) - - proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), logger.With("module", "proxy"), proxy.NopMetrics()) + proxyLogger := logger.With("module", "proxy") + proxyApp := proxy.New(abciclient.NewLocalClient(logger, app), proxyLogger, proxy.NopMetrics()) if err := proxyApp.Start(ctx); err != nil { - return fmt.Errorf("failed to start proxy app connections: %w", err) + t.Fatal(fmt.Errorf("failed to start proxy app connections: %w", err)) } + t.Cleanup(proxyApp.Wait) eventBus := eventbus.NewDefault(logger.With("module", "events")) if err := eventBus.Start(ctx); err != nil { - return fmt.Errorf("failed to start event bus: %w", err) + t.Fatal(fmt.Errorf("failed to start event bus: %w", err)) } + t.Cleanup(func() { eventBus.Stop(); eventBus.Wait() }) mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(ctx, logger, cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) - consensusState.SetEventBus(eventBus) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp, mempool, evpool, blockStore, eventBus) + consensusState, err := NewState(ctx, logger, cfg.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus) + if err != nil { + t.Fatal(err) + } + if privValidator != nil && privValidator != (*privval.FilePV)(nil) { consensusState.SetPrivValidator(ctx, privValidator) } @@ -91,22 +98,24 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) // see wal.go#103 if err := wal.Write(EndHeightMessage{0}); err != nil { - t.Error(err) + t.Fatal(err) } consensusState.wal = wal if err := consensusState.Start(ctx); err != nil { - return fmt.Errorf("failed to start consensus state: %w", err) + t.Fatal(fmt.Errorf("failed to start consensus state: %w", err)) } + t.Cleanup(consensusState.Wait) + + defer consensusState.Stop() + timer := time.NewTimer(time.Minute) + defer timer.Stop() select { case <-numBlocksWritten: - consensusState.Stop() - return nil - case <-time.After(1 * time.Minute): - consensusState.Stop() - return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) + case <-timer.C: + t.Fatal(fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)) } } @@ -115,9 +124,7 @@ func WALWithNBlocks(ctx context.Context, t *testing.T, logger log.Logger, numBlo var b bytes.Buffer wr := bufio.NewWriter(&b) - if err := WALGenerateNBlocks(ctx, t, logger, wr, numBlocks); err != nil { - return []byte{}, err - } + WALGenerateNBlocks(ctx, t, logger, wr, numBlocks) wr.Flush() return b.Bytes(), nil diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index a2c76676c..169b7c327 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "os" "path/filepath" "testing" @@ -41,13 +42,12 @@ func TestWALTruncate(t *testing.T) { require.NoError(t, err) err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(wal.Wait) + t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() }) // 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), // when headBuf is full, truncate content will Flush to the file. at this // time, RotateFile is called, truncate content exist in each file. - err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60) - require.NoError(t, err) + WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60) // put the leakcheck here so it runs after other cleanup // functions. @@ -112,7 +112,7 @@ func TestWALWrite(t *testing.T) { require.NoError(t, err) err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(wal.Wait) + t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() }) // 1) Write returns an error if msg is too big msg := &BlockPartMessage{ @@ -151,7 +151,6 @@ func TestWALSearchForEndHeight(t *testing.T) { wal, err := NewWAL(ctx, logger, walFile) require.NoError(t, err) - t.Cleanup(func() { wal.Stop(); wal.Wait() }) h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) @@ -176,24 +175,24 @@ func TestWALPeriodicSync(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + defer os.RemoveAll(walFile) + wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(250*time.Millisecond)) require.NoError(t, err) wal.SetFlushInterval(walTestFlushInterval) logger := log.NewNopLogger() // Generate some data - err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5) - require.NoError(t, err) + WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5) // We should have data in the buffer now assert.NotZero(t, wal.Group().Buffered()) require.NoError(t, wal.Start(ctx)) - t.Cleanup(func() { wal.Stop(); wal.Wait() }) + t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() }) - time.Sleep(walTestFlushInterval + (10 * time.Millisecond)) + time.Sleep(walTestFlushInterval + (20 * time.Millisecond)) // The data should have been flushed by the periodic sync assert.Zero(t, wal.Group().Buffered()) diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go index 2a7c032b3..5a64c5f1c 100644 --- a/internal/eventbus/event_bus.go +++ b/internal/eventbus/event_bus.go @@ -50,13 +50,6 @@ func (b *EventBus) NumClientSubscriptions(clientID string) int { return b.pubsub.NumClientSubscriptions(clientID) } -// Deprecated: Use SubscribeWithArgs instead. -func (b *EventBus) Subscribe(ctx context.Context, - clientID string, query *tmquery.Query, capacities ...int) (Subscription, error) { - - return b.pubsub.Subscribe(ctx, clientID, query, capacities...) -} - func (b *EventBus) SubscribeWithArgs(ctx context.Context, args tmpubsub.SubscribeArgs) (Subscription, error) { return b.pubsub.SubscribeWithArgs(ctx, args) } @@ -201,28 +194,3 @@ func (b *EventBus) PublishEventValidatorSetUpdates(ctx context.Context, data typ func (b *EventBus) PublishEventEvidenceValidated(ctx context.Context, evidence types.EventDataEvidenceValidated) error { return b.Publish(ctx, types.EventEvidenceValidatedValue, evidence) } - -//----------------------------------------------------------------------------- - -// NopEventBus implements a types.BlockEventPublisher that discards all events. -type NopEventBus struct{} - -func (NopEventBus) PublishEventNewBlock(context.Context, types.EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(context.Context, types.EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventNewEvidence(context.Context, types.EventDataNewEvidence) error { - return nil -} - -func (NopEventBus) PublishEventTx(context.Context, types.EventDataTx) error { - return nil -} - -func (NopEventBus) PublishEventValidatorSetUpdates(context.Context, types.EventDataValidatorSetUpdates) error { - return nil -} diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go index bc816aaca..3ef96b80b 100644 --- a/internal/eventbus/event_bus_test.go +++ b/internal/eventbus/event_bus_test.go @@ -27,7 +27,7 @@ func TestEventBusPublishEventTx(t *testing.T) { require.NoError(t, err) tx := types.Tx("foo") - result := abci.ResponseDeliverTx{ + result := abci.ExecTxResult{ Data: []byte("bar"), Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, @@ -134,7 +134,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { require.NoError(t, err) tx := types.Tx("foo") - result := abci.ResponseDeliverTx{ + result := abci.ExecTxResult{ Data: []byte("bar"), Events: []abci.Event{ { diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index f4afb1f8c..d2d998c91 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -36,14 +36,14 @@ type Pool struct { evidenceList *clist.CList // concurrent linked-list of evidence evidenceSize uint32 // amount of pending evidence - // needed to load validators to verify evidence - stateDB sm.Store // needed to load headers and commits to verify evidence blockStore BlockStore + stateDB sm.Store mtx sync.Mutex // latest state - state sm.State + state sm.State + isStarted bool // evidence from consensus is buffered to this slice, awaiting until the next height // before being flushed to the pool. This prevents broadcasting and proposing of // evidence before the height with which the evidence happened is finished. @@ -60,46 +60,19 @@ type Pool struct { Metrics *Metrics } -func (evpool *Pool) SetEventBus(e *eventbus.EventBus) { - evpool.eventBus = e -} - // NewPool creates an evidence pool. If using an existing evidence store, // it will add all pending evidence to the concurrent list. -func NewPool(logger log.Logger, evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore, metrics *Metrics) (*Pool, error) { - state, err := stateDB.Load() - if err != nil { - return nil, fmt.Errorf("failed to load state: %w", err) - } - - pool := &Pool{ - stateDB: stateDB, +func NewPool(logger log.Logger, evidenceDB dbm.DB, stateStore sm.Store, blockStore BlockStore, metrics *Metrics, eventBus *eventbus.EventBus) *Pool { + return &Pool{ blockStore: blockStore, - state: state, + stateDB: stateStore, logger: logger, evidenceStore: evidenceDB, evidenceList: clist.New(), consensusBuffer: make([]duplicateVoteSet, 0), Metrics: metrics, + eventBus: eventBus, } - - // If pending evidence already in db, in event of prior failure, then check - // for expiration, update the size and load it back to the evidenceList. - pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence() - evList, _, err := pool.listEvidence(prefixPending, -1) - if err != nil { - return nil, err - } - - atomic.StoreUint32(&pool.evidenceSize, uint32(len(evList))) - pool.Metrics.NumEvidence.Set(float64(pool.evidenceSize)) - - for _, ev := range evList { - pool.evidenceList.PushBack(ev) - } - pool.eventBus = nil - - return pool, nil } // PendingEvidence is used primarily as part of block proposal and returns up to @@ -277,6 +250,31 @@ func (evpool *Pool) State() sm.State { return evpool.state } +func (evpool *Pool) Start(state sm.State) error { + if evpool.isStarted { + return errors.New("pool is already running") + } + + evpool.state = state + + // If pending evidence already in db, in event of prior failure, then check + // for expiration, update the size and load it back to the evidenceList. + evpool.pruningHeight, evpool.pruningTime = evpool.removeExpiredPendingEvidence() + evList, _, err := evpool.listEvidence(prefixPending, -1) + if err != nil { + return err + } + + atomic.StoreUint32(&evpool.evidenceSize, uint32(len(evList))) + evpool.Metrics.NumEvidence.Set(float64(evpool.evidenceSize)) + + for _, ev := range evList { + evpool.evidenceList.PushBack(ev) + } + + return nil +} + func (evpool *Pool) Close() error { return evpool.evidenceStore.Close() } @@ -449,6 +447,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide } func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { + batch := evpool.evidenceStore.NewBatch() defer batch.Close() @@ -473,7 +472,6 @@ func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { // remove evidence from the clist evpool.removeEvidenceFromList(blockEvidenceMap) - // update the evidence size atomic.AddUint32(&evpool.evidenceSize, ^uint32(len(blockEvidenceMap)-1)) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 51f785221..dcf44a5df 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -34,6 +34,18 @@ var ( defaultEvidenceMaxBytes int64 = 1000 ) +func startPool(t *testing.T, pool *evidence.Pool, store sm.Store) { + t.Helper() + state, err := store.Load() + if err != nil { + t.Fatalf("cannot load state: %v", err) + } + if err := pool.Start(state); err != nil { + t.Fatalf("cannot start state pool: %v", err) + } + +} + func TestEvidencePoolBasic(t *testing.T) { var ( height = int64(1) @@ -51,9 +63,13 @@ func TestEvidencePoolBasic(t *testing.T) { stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil) stateStore.On("Load").Return(createState(height+1, valSet), nil) - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) - require.NoError(t, setupEventBus(ctx, pool)) + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) + // evidence not seen yet: evs, size := pool.PendingEvidence(defaultEvidenceMaxBytes) require.Equal(t, 0, len(evs)) @@ -115,10 +131,12 @@ func TestAddExpiredEvidence(t *testing.T) { return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}} }) - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - require.NoError(t, setupEventBus(ctx, pool)) + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) testCases := []struct { evHeight int64 @@ -159,9 +177,7 @@ func TestReportConflictingVotes(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - pool, pv := defaultTestPool(ctx, t, height) - - require.NoError(t, setupEventBus(ctx, pool)) + pool, pv, _ := defaultTestPool(ctx, t, height) val := types.NewValidator(pv.PrivKey.PubKey(), 10) @@ -201,9 +217,7 @@ func TestEvidencePoolUpdate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - pool, val := defaultTestPool(ctx, t, height) - - require.NoError(t, setupEventBus(ctx, pool)) + pool, val, _ := defaultTestPool(ctx, t, height) state := pool.State() @@ -273,9 +287,7 @@ func TestVerifyPendingEvidencePasses(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - pool, val := defaultTestPool(ctx, t, height) - - require.NoError(t, setupEventBus(ctx, pool)) + pool, val, _ := defaultTestPool(ctx, t, height) ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( ctx, @@ -295,9 +307,7 @@ func TestVerifyDuplicatedEvidenceFails(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - pool, val := defaultTestPool(ctx, t, height) - - require.NoError(t, setupEventBus(ctx, pool)) + pool, val, _ := defaultTestPool(ctx, t, height) ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( ctx, @@ -321,7 +331,7 @@ func TestEventOnEvidenceValidated(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - pool, val := defaultTestPool(ctx, t, height) + pool, val, eventBus := defaultTestPool(ctx, t, height) ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( ctx, @@ -332,11 +342,6 @@ func TestEventOnEvidenceValidated(t *testing.T) { ) require.NoError(t, err) - eventBus := eventbus.NewDefault(log.TestingLogger()) - require.NoError(t, eventBus.Start(ctx)) - - pool.SetEventBus(eventBus) - const query = `tm.event='EvidenceValidated'` evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: "test", @@ -348,6 +353,9 @@ func TestEventOnEvidenceValidated(t *testing.T) { go func() { defer close(done) msg, err := evSub.Next(ctx) + if ctx.Err() != nil { + return + } assert.NoError(t, err) edt := msg.Data().(types.EventDataEvidenceValidated) @@ -394,14 +402,15 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { blockStore.On("LoadBlockCommit", height).Return(trusted.Commit) blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) - pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - require.NoError(t, setupEventBus(ctx, pool)) + pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus) hash := ev.Hash() - err = pool.AddEvidence(ctx, ev) + err := pool.AddEvidence(ctx, ev) require.NoError(t, err) err = pool.AddEvidence(ctx, ev) require.NoError(t, err) @@ -449,11 +458,13 @@ func TestRecoverPendingEvidence(t *testing.T) { blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress) require.NoError(t, err) - // create previous pool and populate it - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - require.NoError(t, setupEventBus(ctx, pool)) + // create previous pool and populate it + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) goodEvidence, err := types.NewMockDuplicateVoteEvidenceWithValidator( ctx, @@ -495,9 +506,8 @@ func TestRecoverPendingEvidence(t *testing.T) { }, }, nil) - newPool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, newStateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) - + newPool := evidence.NewPool(logger, evidenceDB, newStateStore, blockStore, evidence.NopMetrics(), nil) + startPool(t, newPool, newStateStore) evList, _ := newPool.PendingEvidence(defaultEvidenceMaxBytes) require.Equal(t, 1, len(evList)) @@ -559,10 +569,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo for i := int64(1); i <= state.LastBlockHeight; i++ { lastCommit := makeCommit(i-1, valAddr) - block, err := sf.MakeBlock(state, i, lastCommit) - if err != nil { - return nil, err - } + block := sf.MakeBlock(state, i, lastCommit) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1} @@ -590,7 +597,7 @@ func makeCommit(height int64, valAddr []byte) *types.Commit { return types.NewCommit(height, 0, types.BlockID{}, commitSigs) } -func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV) { +func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV, *eventbus.EventBus) { t.Helper() val := types.NewMockPV() valAddress := val.PrivKey.PubKey().Address() @@ -601,10 +608,14 @@ func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress) require.NoError(t, err) - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err, "test evidence pool could not be created") + logger := log.NewNopLogger() - return pool, val + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) + return pool, val, eventBus } func createState(height int64, valSet *types.ValidatorSet) sm.State { @@ -616,12 +627,3 @@ func createState(height int64, valSet *types.ValidatorSet) sm.State { ConsensusParams: *types.DefaultConsensusParams(), } } - -func setupEventBus(ctx context.Context, evpool *evidence.Pool) error { - eventBus := eventbus.NewDefault(log.TestingLogger()) - if err := eventBus.Start(ctx); err != nil { - return err - } - evpool.SetEventBus(eventBus) - return nil -} diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index d0863acc1..664fb7b4e 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -82,13 +82,14 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint } return nil }) - rts.pools[nodeID], err = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore, evidence.NopMetrics()) - - require.NoError(t, err) eventBus := eventbus.NewDefault(logger) err = eventBus.Start(ctx) require.NoError(t, err) - rts.pools[nodeID].SetEventBus(eventBus) + + rts.pools[nodeID] = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore, evidence.NopMetrics(), eventBus) + startPool(t, rts.pools[nodeID], stateStores[idx]) + + require.NoError(t, err) rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index 607c8fd50..675c5795a 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" sm "github.com/tendermint/tendermint/internal/state" @@ -76,6 +77,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + logger := log.NewNopLogger() attackTime := defaultEvidenceTime.Add(1 * time.Hour) // create valid lunatic evidence @@ -96,8 +98,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header}) blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) blockStore.On("LoadBlockCommit", height).Return(trusted.Commit) - pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + pool := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil) evList := types.EvidenceList{ev} // check that the evidence pool correctly verifies the evidence @@ -111,32 +112,29 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { // if we submit evidence only against a single byzantine validator when we see there are more validators then this // should return an error ev.ByzantineValidators = ev.ByzantineValidators[:1] - t.Log(evList) assert.Error(t, pool.CheckEvidence(ctx, evList)) // restore original byz vals ev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader) // duplicate evidence should be rejected evList = types.EvidenceList{ev, ev} - pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil) assert.Error(t, pool.CheckEvidence(ctx, evList)) // If evidence is submitted with an altered timestamp it should return an error - ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute) - pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - require.NoError(t, setupEventBus(ctx, pool)) + ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute) + pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus) - err = pool.AddEvidence(ctx, ev) + err := pool.AddEvidence(ctx, ev) assert.Error(t, err) ev.Timestamp = defaultEvidenceTime // Evidence submitted with a different validator power should fail ev.TotalVotingPower = 1 - pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil) err = pool.AddEvidence(ctx, ev) assert.Error(t, err) ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower() @@ -154,6 +152,9 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + + logger := log.NewNopLogger() + // create a forward lunatic attack ev, trusted, common := makeLunaticEvidence(ctx, t, attackHeight, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) @@ -179,10 +180,11 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) blockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit) blockStore.On("Height").Return(nodeHeight) - pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) - require.NoError(t, setupEventBus(ctx, pool)) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus) // check that the evidence pool correctly verifies the evidence assert.NoError(t, pool.CheckEvidence(ctx, types.EvidenceList{ev})) @@ -199,8 +201,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { oldBlockStore.On("Height").Return(nodeHeight) require.Equal(t, defaultEvidenceTime, oldBlockStore.LoadBlockMeta(nodeHeight).Header.Time) - pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, oldBlockStore, evidence.NopMetrics()) - require.NoError(t, err) + pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, oldBlockStore, evidence.NopMetrics(), nil) assert.Error(t, pool.CheckEvidence(ctx, types.EvidenceList{ev})) } @@ -208,6 +209,8 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + logger := log.NewNopLogger() + conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10) conflictingHeader := factory.MakeHeader(t, &types.Header{ @@ -289,10 +292,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) - pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - require.NoError(t, setupEventBus(ctx, pool)) + pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus) evList := types.EvidenceList{ev} err = pool.CheckEvidence(ctx, evList) @@ -305,6 +308,9 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { func TestVerifyLightClientAttack_Amnesia(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + + logger := log.NewNopLogger() + var height int64 = 10 conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10) @@ -378,10 +384,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) - pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - require.NoError(t, setupEventBus(ctx, pool)) + pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus) evList := types.EvidenceList{ev} err = pool.CheckEvidence(ctx, evList) @@ -401,6 +407,7 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + logger := log.NewNopLogger() val := types.NewMockPV() val2 := types.NewMockPV() valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(ctx, 1)}) @@ -478,10 +485,11 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}) - pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - require.NoError(t, setupEventBus(ctx, pool)) + pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) evList := types.EvidenceList{goodEv} err = pool.CheckEvidence(ctx, evList) diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 810706607..36bbda802 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -265,7 +265,7 @@ func TestBlockResults(t *testing.T) { // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ - Txs: []*abcitypes.ResponseDeliverTx{ + TxResults: []*abcitypes.ExecTxResult{ { GasUsed: testGasUsed, }, diff --git a/internal/libs/autofile/autofile.go b/internal/libs/autofile/autofile.go index 6f38fc43b..f554228ba 100644 --- a/internal/libs/autofile/autofile.go +++ b/internal/libs/autofile/autofile.go @@ -19,7 +19,7 @@ import ( // Create/Append to ./autofile_test af, err := OpenAutoFile("autofile_test") if err != nil { - panic(err) + log.Fatal(err) } // Stream of writes. @@ -32,7 +32,7 @@ for i := 0; i < 60; i++ { // Close the AutoFile err = af.Close() if err != nil { - panic(err) + log.Fatal(err) } */ @@ -41,9 +41,9 @@ const ( autoFilePerms = os.FileMode(0600) ) -// errAutoFileClosed is reported when operations attempt to use an autofile +// ErrAutoFileClosed is reported when operations attempt to use an autofile // after it has been closed. -var errAutoFileClosed = errors.New("autofile is closed") +var ErrAutoFileClosed = errors.New("autofile is closed") // AutoFile automatically closes and re-opens file for writing. The file is // automatically setup to close itself every 1s and upon receiving SIGHUP. @@ -155,7 +155,7 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { af.mtx.Lock() defer af.mtx.Unlock() if af.closed { - return 0, fmt.Errorf("write: %w", errAutoFileClosed) + return 0, fmt.Errorf("write: %w", ErrAutoFileClosed) } if af.file == nil { @@ -174,7 +174,7 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { func (af *AutoFile) Sync() error { return af.withLock(func() error { if af.closed { - return fmt.Errorf("sync: %w", errAutoFileClosed) + return fmt.Errorf("sync: %w", ErrAutoFileClosed) } else if af.file == nil { return nil // nothing to sync } @@ -189,13 +189,7 @@ func (af *AutoFile) openFile() error { if err != nil { return err } - // fileInfo, err := file.Stat() - // if err != nil { - // return err - // } - // if fileInfo.Mode() != autoFilePerms { - // return errors.NewErrPermissionsChanged(file.Name(), fileInfo.Mode(), autoFilePerms) - // } + af.file = file return nil } @@ -207,7 +201,7 @@ func (af *AutoFile) Size() (int64, error) { af.mtx.Lock() defer af.mtx.Unlock() if af.closed { - return 0, fmt.Errorf("size: %w", errAutoFileClosed) + return 0, fmt.Errorf("size: %w", ErrAutoFileClosed) } if af.file == nil { diff --git a/internal/libs/autofile/cmd/logjack.go b/internal/libs/autofile/cmd/logjack.go index a9f6cf766..c3c466503 100644 --- a/internal/libs/autofile/cmd/logjack.go +++ b/internal/libs/autofile/cmd/logjack.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "io" + stdlog "log" "os" "os/signal" "strconv" @@ -19,19 +20,26 @@ const Version = "0.0.1" const readBufferSize = 1024 // 1KB at a time // Parse command-line options -func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { +func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool, err error) { var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) var chopSizeStr, limitSizeStr string flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") flagSet.BoolVar(&version, "version", false, "Version") - if err := flagSet.Parse(os.Args[1:]); err != nil { - fmt.Printf("err parsing flag: %v\n", err) - os.Exit(1) + + if err = flagSet.Parse(os.Args[1:]); err != nil { + return + } + + chopSize, err = parseByteSize(chopSizeStr) + if err != nil { + return + } + limitSize, err = parseByteSize(limitSizeStr) + if err != nil { + return } - chopSize = parseBytesize(chopSizeStr) - limitSize = parseBytesize(limitSizeStr) return } @@ -41,22 +49,23 @@ func main() { defer func() { fmt.Println("logjack shutting down") }() // Read options - headPath, chopSize, limitSize, version := parseFlags() + headPath, chopSize, limitSize, version, err := parseFlags() + if err != nil { + stdlog.Fatalf("problem parsing arguments: %q", err.Error()) + } + if version { - fmt.Printf("logjack version %v\n", Version) - return + stdlog.Printf("logjack version %s", Version) } // Open Group group, err := auto.OpenGroup(ctx, log.NewNopLogger(), headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) if err != nil { - fmt.Printf("logjack couldn't create output file %v\n", headPath) - os.Exit(1) + stdlog.Fatalf("logjack couldn't create output file %q", headPath) } if err = group.Start(ctx); err != nil { - fmt.Printf("logjack couldn't start with file %v\n", headPath) - os.Exit(1) + stdlog.Fatalf("logjack couldn't start with file %q", headPath) } // Forever read from stdin and write to AutoFile. @@ -65,25 +74,21 @@ func main() { n, err := os.Stdin.Read(buf) if err != nil { if err == io.EOF { - os.Exit(0) - } else { - fmt.Println("logjack errored:", err.Error()) - os.Exit(1) + return } + stdlog.Fatalln("logjack errored:", err.Error()) } _, err = group.Write(buf[:n]) if err != nil { - fmt.Fprintf(os.Stderr, "logjack failed write with error %v\n", headPath) - os.Exit(1) + stdlog.Fatalf("logjack failed write %q with error: %q", headPath, err.Error()) } if err := group.FlushAndSync(); err != nil { - fmt.Fprintf(os.Stderr, "logjack flushsync fail with error %v\n", headPath) - os.Exit(1) + stdlog.Fatalf("logjack flushsync %q fail with error: %q", headPath, err.Error()) } } } -func parseBytesize(chopSize string) int64 { +func parseByteSize(chopSize string) (int64, error) { // Handle suffix multiplier var multiplier int64 = 1 if strings.HasSuffix(chopSize, "T") { @@ -106,8 +111,8 @@ func parseBytesize(chopSize string) int64 { // Parse the numeric part chopSizeInt, err := strconv.Atoi(chopSize) if err != nil { - panic(err) + return 0, err } - return int64(chopSizeInt) * multiplier + return int64(chopSizeInt) * multiplier, nil } diff --git a/internal/libs/autofile/group.go b/internal/libs/autofile/group.go index 1b4418d59..0d1806f22 100644 --- a/internal/libs/autofile/group.go +++ b/internal/libs/autofile/group.go @@ -274,6 +274,10 @@ func (g *Group) checkTotalSizeLimit(ctx context.Context) { g.mtx.Lock() defer g.mtx.Unlock() + if err := ctx.Err(); err != nil { + return + } + if g.totalSizeLimit == 0 { return } @@ -290,6 +294,11 @@ func (g *Group) checkTotalSizeLimit(ctx context.Context) { g.logger.Error("Group's head may grow without bound", "head", g.Head.Path) return } + + if ctx.Err() != nil { + return + } + pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) fInfo, err := os.Stat(pathToRemove) if err != nil { @@ -309,11 +318,16 @@ func (g *Group) checkTotalSizeLimit(ctx context.Context) { } } -// rotateFile causes group to close the current head and assign it some index. +// rotateFile causes group to close the current head and assign it +// some index. Panics if it encounters an error. func (g *Group) rotateFile(ctx context.Context) { g.mtx.Lock() defer g.mtx.Unlock() + if err := ctx.Err(); err != nil { + return + } + headPath := g.Head.Path if err := g.headBuf.Flush(); err != nil { diff --git a/internal/libs/flowrate/flowrate.go b/internal/libs/flowrate/flowrate.go index c2234669b..aaa54a22c 100644 --- a/internal/libs/flowrate/flowrate.go +++ b/internal/libs/flowrate/flowrate.go @@ -275,3 +275,15 @@ func (m *Monitor) waitNextSample(now time.Duration) time.Duration { } return now } + +// CurrentTransferRate returns the current transfer rate +func (m *Monitor) CurrentTransferRate() int64 { + m.mu.Lock() + defer m.mu.Unlock() + + if m.sLast > m.start && m.active { + return round(m.rEMA) + } + + return 0 +} diff --git a/internal/libs/queue/queue_test.go b/internal/libs/queue/queue_test.go index 204c18653..08ecc3955 100644 --- a/internal/libs/queue/queue_test.go +++ b/internal/libs/queue/queue_test.go @@ -167,7 +167,7 @@ func TestWait(t *testing.T) { defer close(done) got, err := q.Wait(ctx) if err != nil { - t.Errorf("Wait: unexpected error: %w", err) + t.Errorf("Wait: unexpected error: %v", err) } else if got != input { t.Errorf("Wait: got %q, want %q", got, input) } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 21429721d..6fcfe86c1 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -9,10 +9,10 @@ import ( "sync/atomic" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" - "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" @@ -31,7 +31,7 @@ type TxMempool struct { logger log.Logger metrics *Metrics config *config.MempoolConfig - proxyAppConn proxy.AppConnMempool + proxyAppConn abciclient.Client // txsAvailable fires once for each height when the mempool is not empty txsAvailable chan struct{} @@ -93,8 +93,7 @@ type TxMempool struct { func NewTxMempool( logger log.Logger, cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, + proxyAppConn abciclient.Client, options ...TxMempoolOption, ) *TxMempool { @@ -102,7 +101,7 @@ func NewTxMempool( logger: logger, config: cfg, proxyAppConn: proxyAppConn, - height: height, + height: -1, cache: NopTxCache{}, metrics: NopMetrics(), txStore: NewTxStore(), @@ -418,11 +417,10 @@ func (txmp *TxMempool) Update( ctx context.Context, blockHeight int64, blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, + execTxResult []*abci.ExecTxResult, newPreFn PreCheckFunc, newPostFn PostCheckFunc, ) error { - txmp.height = blockHeight txmp.notifiedTxsAvailable = false @@ -434,7 +432,7 @@ func (txmp *TxMempool) Update( } for i, tx := range blockTxs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { + if execTxResult[i].Code == abci.CodeTypeOK { // add the valid committed transaction to the cache (if missing) _ = txmp.cache.Push(tx) } else if !txmp.config.KeepInvalidTxsInCache { diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index e2cf12e07..e4f604cb1 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -78,24 +78,24 @@ func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoo var cancel context.CancelFunc ctx, cancel = context.WithCancel(ctx) - app := &application{kvstore.NewApplication()} - cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() + conn := abciclient.NewLocalClient(logger, &application{ + kvstore.NewApplication(), + }) + cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|")) require.NoError(t, err) cfg.Mempool.CacheSize = cacheSize - appConnMem, err := cc(logger) - require.NoError(t, err) - require.NoError(t, appConnMem.Start(ctx)) + require.NoError(t, conn.Start(ctx)) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) cancel() - appConnMem.Wait() + conn.Wait() }) - return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) + return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, conn, options...) } func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { @@ -172,9 +172,9 @@ func TestTxMempool_TxsAvailable(t *testing.T) { rawTxs[i] = tx.tx } - responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + responses := make([]*abci.ExecTxResult, len(rawTxs[:50])) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } // commit half the transactions and ensure we fire an event @@ -204,9 +204,9 @@ func TestTxMempool_Size(t *testing.T) { rawTxs[i] = tx.tx } - responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + responses := make([]*abci.ExecTxResult, len(rawTxs[:50])) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() @@ -231,9 +231,9 @@ func TestTxMempool_Flush(t *testing.T) { rawTxs[i] = tx.tx } - responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + responses := make([]*abci.ExecTxResult, len(rawTxs[:50])) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() @@ -446,7 +446,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { for range ticker.C { reapedTxs := txmp.ReapMaxTxs(200) if len(reapedTxs) > 0 { - responses := make([]*abci.ResponseDeliverTx, len(reapedTxs)) + responses := make([]*abci.ExecTxResult, len(reapedTxs)) for i := 0; i < len(responses); i++ { var code uint32 @@ -456,7 +456,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { code = abci.CodeTypeOK } - responses[i] = &abci.ResponseDeliverTx{Code: code} + responses[i] = &abci.ExecTxResult{Code: code} } txmp.Lock() @@ -494,9 +494,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { // reap 5 txs at the next height -- no txs should expire reapedTxs := txmp.ReapMaxTxs(5) - responses := make([]*abci.ResponseDeliverTx, len(reapedTxs)) + responses := make([]*abci.ExecTxResult, len(reapedTxs)) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() @@ -520,9 +520,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { // removed. However, we do know that that at most 95 txs can be expired and // removed. reapedTxs = txmp.ReapMaxTxs(5) - responses = make([]*abci.ResponseDeliverTx, len(reapedTxs)) + responses = make([]*abci.ExecTxResult, len(reapedTxs)) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go deleted file mode 100644 index e8782c914..000000000 --- a/internal/mempool/mock/mempool.go +++ /dev/null @@ -1,46 +0,0 @@ -package mock - -import ( - "context" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/libs/clist" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" -) - -// Mempool is an empty implementation of a Mempool, useful for testing. -type Mempool struct{} - -var _ Mempool = Mempool{} - -func (Mempool) Lock() {} -func (Mempool) Unlock() {} -func (Mempool) Size() int { return 0 } -func (Mempool) CheckTx(context.Context, types.Tx, func(*abci.ResponseCheckTx), mempool.TxInfo) error { - return nil -} -func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } -func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } -func (Mempool) Update( - _ context.Context, - _ int64, - _ types.Txs, - _ []*abci.ResponseDeliverTx, - _ mempool.PreCheckFunc, - _ mempool.PostCheckFunc, -) error { - return nil -} -func (Mempool) Flush() {} -func (Mempool) FlushAppConn(ctx context.Context) error { return nil } -func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (Mempool) EnableTxsAvailable() {} -func (Mempool) SizeBytes() int64 { return 0 } - -func (Mempool) TxsFront() *clist.CElement { return nil } -func (Mempool) TxsWaitChan() <-chan struct{} { return nil } - -func (Mempool) InitWAL() error { return nil } -func (Mempool) CloseWAL() {} diff --git a/internal/mempool/mocks/mempool.go b/internal/mempool/mocks/mempool.go new file mode 100644 index 000000000..d4cdfabd7 --- /dev/null +++ b/internal/mempool/mocks/mempool.go @@ -0,0 +1,172 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + abcitypes "github.com/tendermint/tendermint/abci/types" + + mempool "github.com/tendermint/tendermint/internal/mempool" + + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/types" +) + +// Mempool is an autogenerated mock type for the Mempool type +type Mempool struct { + mock.Mock +} + +// CheckTx provides a mock function with given fields: ctx, tx, callback, txInfo +func (_m *Mempool) CheckTx(ctx context.Context, tx types.Tx, callback func(*abcitypes.ResponseCheckTx), txInfo mempool.TxInfo) error { + ret := _m.Called(ctx, tx, callback, txInfo) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Tx, func(*abcitypes.ResponseCheckTx), mempool.TxInfo) error); ok { + r0 = rf(ctx, tx, callback, txInfo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EnableTxsAvailable provides a mock function with given fields: +func (_m *Mempool) EnableTxsAvailable() { + _m.Called() +} + +// Flush provides a mock function with given fields: +func (_m *Mempool) Flush() { + _m.Called() +} + +// FlushAppConn provides a mock function with given fields: _a0 +func (_m *Mempool) FlushAppConn(_a0 context.Context) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Lock provides a mock function with given fields: +func (_m *Mempool) Lock() { + _m.Called() +} + +// ReapMaxBytesMaxGas provides a mock function with given fields: maxBytes, maxGas +func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { + ret := _m.Called(maxBytes, maxGas) + + var r0 types.Txs + if rf, ok := ret.Get(0).(func(int64, int64) types.Txs); ok { + r0 = rf(maxBytes, maxGas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Txs) + } + } + + return r0 +} + +// ReapMaxTxs provides a mock function with given fields: max +func (_m *Mempool) ReapMaxTxs(max int) types.Txs { + ret := _m.Called(max) + + var r0 types.Txs + if rf, ok := ret.Get(0).(func(int) types.Txs); ok { + r0 = rf(max) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Txs) + } + } + + return r0 +} + +// RemoveTxByKey provides a mock function with given fields: txKey +func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { + ret := _m.Called(txKey) + + var r0 error + if rf, ok := ret.Get(0).(func(types.TxKey) error); ok { + r0 = rf(txKey) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Size provides a mock function with given fields: +func (_m *Mempool) Size() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// SizeBytes provides a mock function with given fields: +func (_m *Mempool) SizeBytes() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// TxsAvailable provides a mock function with given fields: +func (_m *Mempool) TxsAvailable() <-chan struct{} { + ret := _m.Called() + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Unlock provides a mock function with given fields: +func (_m *Mempool) Unlock() { + _m.Called() +} + +// Update provides a mock function with given fields: ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn +func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { + ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { + r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index c073a7356..04e51ca8d 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -242,9 +242,9 @@ func TestReactorConcurrency(t *testing.T) { mempool.Lock() defer mempool.Unlock() - deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) + deliverTxResponses := make([]*abci.ExecTxResult, len(txs)) for i := range txs { - deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} + deliverTxResponses[i] = &abci.ExecTxResult{Code: 0} } require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil)) @@ -261,7 +261,7 @@ func TestReactorConcurrency(t *testing.T) { mempool.Lock() defer mempool.Unlock() - err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) + err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil) require.NoError(t, err) }() } diff --git a/internal/mempool/types.go b/internal/mempool/types.go index d78517372..a51d286e2 100644 --- a/internal/mempool/types.go +++ b/internal/mempool/types.go @@ -23,6 +23,8 @@ const ( MaxActiveIDs = math.MaxUint16 ) +//go:generate ../../scripts/mockery_generate.sh Mempool + // Mempool defines the mempool interface. // // Updates to the mempool need to be synchronized with committing a block so @@ -66,7 +68,7 @@ type Mempool interface { ctx context.Context, blockHeight int64, blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, + txResults []*abci.ExecTxResult, newPreFn PreCheckFunc, newPostFn PostCheckFunc, ) error diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 693a7ce58..4cbca7f19 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -413,7 +413,7 @@ func (c *MConnection) sendSomePacketMsgs(ctx context.Context) bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. - c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) + c.sendMonitor.Limit(c._maxPacketMsgSize, c.config.SendRate, true) // Now send some PacketMsgs. for i := 0; i < numBatchPacketMsgs; i++ { @@ -481,7 +481,7 @@ FOR_LOOP: } // Block until .recvMonitor says we can read. - c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) + c.recvMonitor.Limit(c._maxPacketMsgSize, c.config.RecvRate, true) // Peek into bufConnReader for debugging /* diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 362c8102f..6752d9d21 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -126,7 +126,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { nodePrvKey := ed25519.GenPrivKey() nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) if err != nil { - t.Errorf("failed to establish SecretConnection for node: %w", err) + t.Errorf("failed to establish SecretConnection for node: %v", err) return nil, true, err } // In parallel, handle some reads and writes. @@ -136,7 +136,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { for _, nodeWrite := range nodeWrites { n, err := nodeSecretConn.Write([]byte(nodeWrite)) if err != nil { - t.Errorf("failed to write to nodeSecretConn: %w", err) + t.Errorf("failed to write to nodeSecretConn: %v", err) return nil, true, err } if n != len(nodeWrite) { @@ -163,7 +163,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { } return nil, false, nil } else if err != nil { - t.Errorf("failed to read from nodeSecretConn: %w", err) + t.Errorf("failed to read from nodeSecretConn: %v", err) return nil, true, err } *nodeReads = append(*nodeReads, string(readBuffer[:n])) @@ -288,7 +288,7 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n i for i := 0; i < n; i++ { _, err := conn.Write([]byte(txt)) if err != nil { - t.Errorf("failed to write to fooSecConn: %w", err) + t.Errorf("failed to write to fooSecConn: %v", err) return } } @@ -343,7 +343,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection func(_ int) (val interface{}, abort bool, err error) { fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) if err != nil { - tb.Errorf("failed to establish SecretConnection for foo: %w", err) + tb.Errorf("failed to establish SecretConnection for foo: %v", err) return nil, true, err } remotePubBytes := fooSecConn.RemotePubKey() @@ -358,7 +358,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection func(_ int) (val interface{}, abort bool, err error) { barSecConn, err = MakeSecretConnection(barConn, barPrvKey) if barSecConn == nil { - tb.Errorf("failed to establish SecretConnection for bar: %w", err) + tb.Errorf("failed to establish SecretConnection for bar: %v", err) return nil, true, err } remotePubBytes := barSecConn.RemotePubKey() @@ -405,7 +405,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) { if err == io.EOF { return } else if err != nil { - b.Errorf("failed to read from barSecConn: %w", err) + b.Errorf("failed to read from barSecConn: %v", err) return } } @@ -416,7 +416,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) { idx := mrand.Intn(len(fooWriteBytes)) _, err := fooSecConn.Write(fooWriteBytes[idx]) if err != nil { - b.Errorf("failed to write to fooSecConn: %w", err) + b.Errorf("failed to write to fooSecConn: %v", err) return } } diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 0c256a4f3..2beaeaa17 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -3,14 +3,12 @@ package pex import ( "context" "fmt" - "runtime/debug" "sync" "time" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" @@ -42,7 +40,7 @@ const ( minReceiveRequestInterval = 100 * time.Millisecond // the maximum amount of addresses that can be included in a response - maxAddresses uint16 = 100 + maxAddresses = 100 // How long to wait when there are no peers available before trying again noAvailablePeersWaitPeriod = 1 * time.Second @@ -100,15 +98,8 @@ type Reactor struct { // minReceiveRequestInterval). lastReceivedRequests map[types.NodeID]time.Time - // keep track of how many new peers to existing peers we have received to - // extrapolate the size of the network - newPeers uint32 - totalPeers uint32 - - // discoveryRatio is the inverse ratio of new peers to old peers squared. - // This is multiplied by the minimum duration to calculate how long to wait - // between each request. - discoveryRatio float32 + // the total number of unique peers added + totalPeers int } // NewReactor returns a reference to a new reactor. @@ -156,16 +147,6 @@ func (r *Reactor) OnStop() {} // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. func (r *Reactor) processPexCh(ctx context.Context) { - timer := time.NewTimer(0) - defer timer.Stop() - - r.mtx.Lock() - var ( - duration = r.calculateNextRequestTime() - err error - ) - r.mtx.Unlock() - incoming := make(chan *p2p.Envelope) go func() { defer close(incoming) @@ -179,36 +160,51 @@ func (r *Reactor) processPexCh(ctx context.Context) { } }() + // Initially, we will request peers quickly to bootstrap. This duration + // will be adjusted upward as knowledge of the network grows. + var nextPeerRequest = minReceiveRequestInterval + + timer := time.NewTimer(0) + defer timer.Stop() + for { - timer.Reset(duration) + timer.Reset(nextPeerRequest) select { case <-ctx.Done(): return - // outbound requests for new peers case <-timer.C: - duration, err = r.sendRequestForPeers(ctx) - if err != nil { + // Send a request for more peer addresses. + if err := r.sendRequestForPeers(ctx); err != nil { return + // TODO(creachadair): Do we really want to stop processing the PEX + // channel just because of an error here? } - // inbound requests for new peers or responses to requests sent by this - // reactor + + // Note we do not update the poll timer upon making a request, only + // when we receive an update that updates our priors. + case envelope, ok := <-incoming: if !ok { - return + return // channel closed } - duration, err = r.handleMessage(ctx, r.pexCh.ID, envelope) + + // A request from another peer, or a response to one of our requests. + dur, err := r.handlePexMessage(ctx, envelope) if err != nil { - r.logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) + r.logger.Error("failed to process message", + "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) if serr := r.pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, }); serr != nil { return } + } else if dur != 0 { + // We got a useful result; update the poll timer. + nextPeerRequest = dur } - } } } @@ -228,19 +224,20 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) { } // handlePexMessage handles envelopes sent from peers on the PexChannel. +// If an update was received, a new polling interval is returned; otherwise the +// duration is 0. func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope) (time.Duration, error) { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { case *protop2p.PexRequest: - // check if the peer hasn't sent a prior request too close to this one - // in time + // Verify that this peer hasn't sent us another request too recently. if err := r.markPeerRequest(envelope.From); err != nil { - return time.Minute, err + return 0, err } - // request peers from the peer manager and parse the NodeAddresses into - // URL strings + // Fetch peers from the peer manager, convert NodeAddresses into URL + // strings, and send them back to the caller. nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses)) for idx, addr := range nodeAddresses { @@ -248,28 +245,24 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope) URL: addr.String(), } } - if err := r.pexCh.Send(ctx, p2p.Envelope{ + return 0, r.pexCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &protop2p.PexResponse{Addresses: pexAddresses}, - }); err != nil { - return 0, err - } + }) - return time.Second, nil case *protop2p.PexResponse: - // check if the response matches a request that was made to that peer + // Verify that this response corresponds to one of our pending requests. if err := r.markPeerResponse(envelope.From); err != nil { - return time.Minute, err + return 0, err } - // check the size of the response - if len(msg.Addresses) > int(maxAddresses) { - return 10 * time.Minute, fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", - maxAddresses, - len(msg.Addresses), - ) + // Verify that the response does not exceed the safety limit. + if len(msg.Addresses) > maxAddresses { + return 0, fmt.Errorf("peer sent too many addresses (%d > maxiumum %d)", + len(msg.Addresses), maxAddresses) } + var numAdded int for _, pexAddress := range msg.Addresses { peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL) if err != nil { @@ -278,45 +271,19 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope) added, err := r.peerManager.Add(peerAddress) if err != nil { logger.Error("failed to add PEX address", "address", peerAddress, "err", err) + continue } if added { - r.newPeers++ + numAdded++ logger.Debug("added PEX address", "address", peerAddress) } - r.totalPeers++ - } - - return 10 * time.Minute, nil - default: - return time.Second, fmt.Errorf("received unknown message: %T", msg) - } -} - -// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. -// It will handle errors and any possible panics gracefully. A caller can handle -// any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (duration time.Duration, err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic in processing message: %v", e) - r.logger.Error( - "recovering from processing message panic", - "err", err, - "stack", string(debug.Stack()), - ) } - }() - r.logger.Debug("received PEX message", "peer", envelope.From) + return r.calculateNextRequestTime(numAdded), nil - switch chID { - case p2p.ChannelID(PexChannel): - duration, err = r.handlePexMessage(ctx, envelope) default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + return 0, fmt.Errorf("received unknown message: %T", msg) } - - return } // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we @@ -338,95 +305,87 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } } -// sendRequestForPeers pops the first peerID off the list and sends the -// peer a request for more peer addresses. The function then moves the -// peer into the requestsSent bucket and calculates when the next request -// time should be -func (r *Reactor) sendRequestForPeers(ctx context.Context) (time.Duration, error) { +// sendRequestForPeers chooses a peer from the set of available peers and sends +// that peer a request for more peer addresses. The chosen peer is moved into +// the requestsSent bucket so that we will not attempt to contact them again +// until they've replied or updated. +func (r *Reactor) sendRequestForPeers(ctx context.Context) error { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { // no peers are available - r.logger.Debug("no available peers to send request to, waiting...") - return noAvailablePeersWaitPeriod, nil + r.logger.Debug("no available peers to send a PEX request to (retrying)") + return nil } - var peerID types.NodeID - // use range to get a random peer. + // Select an arbitrary peer from the available set. + var peerID types.NodeID for peerID = range r.availablePeers { break } - // send out the pex request if err := r.pexCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &protop2p.PexRequest{}, }); err != nil { - return 0, err + return err } - // remove the peer from the abvailable peers list and mark it in the requestsSent map + // Move the peer from available to pending. delete(r.availablePeers, peerID) r.requestsSent[peerID] = struct{}{} - dur := r.calculateNextRequestTime() - r.logger.Debug("peer request sent", "next_request_time", dur) - return dur, nil + return nil } -// calculateNextRequestTime implements something of a proportional controller -// to estimate how often the reactor should be requesting new peer addresses. -// The dependent variable in this calculation is the ratio of new peers to -// all peers that the reactor receives. The interval is thus calculated as the -// inverse squared. In the beginning, all peers should be new peers. -// We expect this ratio to be near 1 and thus the interval to be as short -// as possible. As the node becomes more familiar with the network the ratio of -// new nodes will plummet to a very small number, meaning the interval expands -// to its upper bound. +// calculateNextRequestTime selects how long we should wait before attempting +// to send out another request for peer addresses. +// +// This implements a simplified proportional control mechanism to poll more +// often when our knowledge of the network is incomplete, and less often as our +// knowledge grows. To estimate our knowledge of the network, we use the +// fraction of "new" peers (addresses we have not previously seen) to the total +// so far observed. When we first join the network, this fraction will be close +// to 1, meaning most new peers are "new" to us, and as we discover more peers, +// the fraction will go toward zero. // -// CONTRACT: The caller must hold r.mtx exclusively when calling this method. -func (r *Reactor) calculateNextRequestTime() time.Duration { - // check if the peer store is full. If so then there is no need - // to send peer requests too often +// The minimum interval will be minReceiveRequestInterval to ensure we will not +// request from any peer more often than we would allow them to do from us. +func (r *Reactor) calculateNextRequestTime(added int) time.Duration { + r.mtx.Lock() + defer r.mtx.Unlock() + + r.totalPeers += added + + // If the peer store is nearly full, wait the maximum interval. if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { - r.logger.Debug("peer manager near full ratio, sleeping...", + r.logger.Debug("Peer manager is nearly full", "sleep_period", fullCapacityInterval, "ratio", ratio) return fullCapacityInterval } - // baseTime represents the shortest interval that we can send peer requests - // in. For example if we have 10 peers and we can't send a message to the - // same peer every 500ms, then we can send a request every 50ms. In practice - // we use a safety margin of 2, ergo 100ms - peers := tmmath.MinInt(len(r.availablePeers), 50) - baseTime := minReceiveRequestInterval - if peers > 0 { - baseTime = minReceiveRequestInterval * 2 / time.Duration(peers) + // If there are no available peers to query, poll less aggressively. + if len(r.availablePeers) == 0 { + r.logger.Debug("No available peers to send a PEX request", + "sleep_period", noAvailablePeersWaitPeriod) + return noAvailablePeersWaitPeriod } - if r.totalPeers > 0 || r.discoveryRatio == 0 { - // find the ratio of new peers. NOTE: We add 1 to both sides to avoid - // divide by zero problems - ratio := float32(r.totalPeers+1) / float32(r.newPeers+1) - // square the ratio in order to get non linear time intervals - // NOTE: The longest possible interval for a network with 100 or more peers - // where a node is connected to 50 of them is 2 minutes. - r.discoveryRatio = ratio * ratio - r.newPeers = 0 - r.totalPeers = 0 - } - // NOTE: As ratio is always >= 1, discovery ratio is >= 1. Therefore we don't need to worry - // about the next request time being less than the minimum time - return baseTime * time.Duration(r.discoveryRatio) + // Reaching here, there are available peers to query and the peer store + // still has space. Estimate our knowledge of the network from the latest + // update and choose a new interval. + base := float64(minReceiveRequestInterval) / float64(len(r.availablePeers)) + multiplier := float64(r.totalPeers+1) / float64(added+1) // +1 to avert zero division + return time.Duration(base*multiplier*multiplier) + minReceiveRequestInterval } func (r *Reactor) markPeerRequest(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok { - if time.Now().Before(lastRequestTime.Add(minReceiveRequestInterval)) { - return fmt.Errorf("peer sent a request too close after a prior one. Minimum interval: %v", - minReceiveRequestInterval) + if d := time.Since(lastRequestTime); d < minReceiveRequestInterval { + return fmt.Errorf("peer %v sent PEX request too soon (%v < minimum %v)", + peer, d, minReceiveRequestInterval) } } r.lastReceivedRequests[peer] = time.Now() diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 4319cad20..356d7f435 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -96,7 +96,7 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { peerErr := <-r.pexErrCh require.Error(t, peerErr.Err) require.Empty(t, r.pexOutCh) - require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one") + require.Contains(t, peerErr.Err.Error(), "sent PEX request too soon") require.Equal(t, badNode, peerErr.NodeID) } diff --git a/internal/proxy/app_conn.go b/internal/proxy/app_conn.go deleted file mode 100644 index f30757f45..000000000 --- a/internal/proxy/app_conn.go +++ /dev/null @@ -1,249 +0,0 @@ -package proxy - -import ( - "context" - "time" - - "github.com/go-kit/kit/metrics" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/types" -) - -//go:generate ../../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot - -//---------------------------------------------------------------------------------------- -// Enforce which abci msgs can be sent on a connection at the type level - -type AppConnConsensus interface { - Error() error - - InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) - - PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) - ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error) - ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error) - VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) - FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) - Commit(context.Context) (*types.ResponseCommit, error) -} - -type AppConnMempool interface { - Error() error - - CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - - Flush(context.Context) error -} - -type AppConnQuery interface { - Error() error - - Echo(context.Context, string) (*types.ResponseEcho, error) - Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error) - Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error) -} - -type AppConnSnapshot interface { - Error() error - - ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) -} - -//----------------------------------------------------------------------------------------- -// Implements AppConnConsensus (subset of abciclient.Client) - -type appConnConsensus struct { - metrics *Metrics - appConn abciclient.Client -} - -var _ AppConnConsensus = (*appConnConsensus)(nil) - -func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus { - return &appConnConsensus{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnConsensus) Error() error { - return app.appConn.Error() -} - -func (app *appConnConsensus) InitChain( - ctx context.Context, - req types.RequestInitChain, -) (*types.ResponseInitChain, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() - return app.appConn.InitChain(ctx, req) -} - -func (app *appConnConsensus) PrepareProposal( - ctx context.Context, - req types.RequestPrepareProposal, -) (*types.ResponsePrepareProposal, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))() - return app.appConn.PrepareProposal(ctx, req) -} - -func (app *appConnConsensus) ProcessProposal( - ctx context.Context, - req types.RequestProcessProposal, -) (*types.ResponseProcessProposal, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))() - return app.appConn.ProcessProposal(ctx, req) -} - -func (app *appConnConsensus) ExtendVote( - ctx context.Context, - req types.RequestExtendVote, -) (*types.ResponseExtendVote, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))() - return app.appConn.ExtendVote(ctx, req) -} - -func (app *appConnConsensus) VerifyVoteExtension( - ctx context.Context, - req types.RequestVerifyVoteExtension, -) (*types.ResponseVerifyVoteExtension, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))() - return app.appConn.VerifyVoteExtension(ctx, req) -} - -func (app *appConnConsensus) FinalizeBlock( - ctx context.Context, - req types.RequestFinalizeBlock, -) (*types.ResponseFinalizeBlock, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))() - return app.appConn.FinalizeBlock(ctx, req) -} - -func (app *appConnConsensus) Commit(ctx context.Context) (*types.ResponseCommit, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() - return app.appConn.Commit(ctx) -} - -//------------------------------------------------ -// Implements AppConnMempool (subset of abciclient.Client) - -type appConnMempool struct { - metrics *Metrics - appConn abciclient.Client -} - -func NewAppConnMempool(appConn abciclient.Client, metrics *Metrics) AppConnMempool { - return &appConnMempool{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnMempool) Error() error { - return app.appConn.Error() -} - -func (app *appConnMempool) Flush(ctx context.Context) error { - defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() - return app.appConn.Flush(ctx) -} - -func (app *appConnMempool) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() - return app.appConn.CheckTx(ctx, req) -} - -//------------------------------------------------ -// Implements AppConnQuery (subset of abciclient.Client) - -type appConnQuery struct { - metrics *Metrics - appConn abciclient.Client -} - -func NewAppConnQuery(appConn abciclient.Client, metrics *Metrics) AppConnQuery { - return &appConnQuery{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnQuery) Error() error { - return app.appConn.Error() -} - -func (app *appConnQuery) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() - return app.appConn.Echo(ctx, msg) -} - -func (app *appConnQuery) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() - return app.appConn.Info(ctx, req) -} - -func (app *appConnQuery) Query(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() - return app.appConn.Query(ctx, reqQuery) -} - -//------------------------------------------------ -// Implements AppConnSnapshot (subset of abciclient.Client) - -type appConnSnapshot struct { - metrics *Metrics - appConn abciclient.Client -} - -func NewAppConnSnapshot(appConn abciclient.Client, metrics *Metrics) AppConnSnapshot { - return &appConnSnapshot{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnSnapshot) Error() error { - return app.appConn.Error() -} - -func (app *appConnSnapshot) ListSnapshots( - ctx context.Context, - req types.RequestListSnapshots, -) (*types.ResponseListSnapshots, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() - return app.appConn.ListSnapshots(ctx, req) -} - -func (app *appConnSnapshot) OfferSnapshot( - ctx context.Context, - req types.RequestOfferSnapshot, -) (*types.ResponseOfferSnapshot, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() - return app.appConn.OfferSnapshot(ctx, req) -} - -func (app *appConnSnapshot) LoadSnapshotChunk( - ctx context.Context, - req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() - return app.appConn.LoadSnapshotChunk(ctx, req) -} - -func (app *appConnSnapshot) ApplySnapshotChunk( - ctx context.Context, - req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() - return app.appConn.ApplySnapshotChunk(ctx, req) -} - -// addTimeSample returns a function that, when called, adds an observation to m. -// The observation added to m is the number of seconds ellapsed since addTimeSample -// was initially called. addTimeSample is meant to be called in a defer to calculate -// the amount of time a function takes to complete. -func addTimeSample(m metrics.Histogram) func() { - start := time.Now() - return func() { m.Observe(time.Since(start).Seconds()) } -} diff --git a/internal/proxy/client.go b/internal/proxy/client.go index d01634bdf..7444c841e 100644 --- a/internal/proxy/client.go +++ b/internal/proxy/client.go @@ -1,42 +1,213 @@ package proxy import ( + "context" "io" + "os" + "syscall" + "time" + "github.com/go-kit/kit/metrics" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" e2e "github.com/tendermint/tendermint/test/e2e/app" ) -// DefaultClientCreator returns a default ClientCreator, which will create a -// local client if addr is one of: 'kvstore', -// 'persistent_kvstore', 'e2e', or 'noop', otherwise - a remote client. +// ClientFactory returns a client object, which will create a local +// client if addr is one of: 'kvstore', 'persistent_kvstore', 'e2e', +// or 'noop', otherwise - a remote client. // // The Closer is a noop except for persistent_kvstore applications, // which will clean up the store. -func DefaultClientCreator(logger log.Logger, addr, transport, dbDir string) (abciclient.Creator, io.Closer) { +func ClientFactory(logger log.Logger, addr, transport, dbDir string) (abciclient.Client, io.Closer, error) { switch addr { case "kvstore": - return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} + return abciclient.NewLocalClient(logger, kvstore.NewApplication()), noopCloser{}, nil case "persistent_kvstore": app := kvstore.NewPersistentKVStoreApplication(logger, dbDir) - return abciclient.NewLocalCreator(app), app + return abciclient.NewLocalClient(logger, app), app, nil case "e2e": app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) if err != nil { - panic(err) + return nil, noopCloser{}, err } - return abciclient.NewLocalCreator(app), noopCloser{} + return abciclient.NewLocalClient(logger, app), noopCloser{}, nil case "noop": - return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} + return abciclient.NewLocalClient(logger, types.NewBaseApplication()), noopCloser{}, nil default: - mustConnect := false // loop retrying - return abciclient.NewRemoteCreator(logger, addr, transport, mustConnect), noopCloser{} + const mustConnect = false // loop retrying + client, err := abciclient.NewClient(logger, addr, transport, mustConnect) + if err != nil { + return nil, noopCloser{}, err + } + + return client, noopCloser{}, nil } } type noopCloser struct{} func (noopCloser) Close() error { return nil } + +// proxyClient provides the application connection. +type proxyClient struct { + service.BaseService + logger log.Logger + + client abciclient.Client + metrics *Metrics +} + +// New creates a proxy application interface. +func New(client abciclient.Client, logger log.Logger, metrics *Metrics) abciclient.Client { + conn := &proxyClient{ + logger: logger, + metrics: metrics, + client: client, + } + conn.BaseService = *service.NewBaseService(logger, "proxyClient", conn) + return conn +} + +func (app *proxyClient) OnStop() { tryCallStop(app.client) } +func (app *proxyClient) Error() error { return app.client.Error() } + +func tryCallStop(client abciclient.Client) { + if c, ok := client.(interface{ Stop() }); ok { + c.Stop() + } +} + +func (app *proxyClient) OnStart(ctx context.Context) error { + var err error + defer func() { + if err != nil { + tryCallStop(app.client) + } + }() + + // Kill Tendermint if the ABCI application crashes. + go func() { + if !app.client.IsRunning() { + return + } + app.client.Wait() + if ctx.Err() != nil { + return + } + + if err := app.client.Error(); err != nil { + app.logger.Error("client connection terminated. Did the application crash? Please restart tendermint", + "err", err) + + if killErr := kill(); killErr != nil { + app.logger.Error("Failed to kill this process - please do so manually", + "err", killErr) + } + } + + }() + + return app.client.Start(ctx) +} + +func kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + + return p.Signal(syscall.SIGABRT) +} + +func (app *proxyClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() + return app.client.InitChain(ctx, req) +} + +func (app *proxyClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))() + return app.client.PrepareProposal(ctx, req) +} + +func (app *proxyClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))() + return app.client.ProcessProposal(ctx, req) +} + +func (app *proxyClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))() + return app.client.ExtendVote(ctx, req) +} + +func (app *proxyClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))() + return app.client.VerifyVoteExtension(ctx, req) +} + +func (app *proxyClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))() + return app.client.FinalizeBlock(ctx, req) +} + +func (app *proxyClient) Commit(ctx context.Context) (*types.ResponseCommit, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() + return app.client.Commit(ctx) +} + +func (app *proxyClient) Flush(ctx context.Context) error { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() + return app.client.Flush(ctx) +} + +func (app *proxyClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() + return app.client.CheckTx(ctx, req) +} + +func (app *proxyClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() + return app.client.Echo(ctx, msg) +} + +func (app *proxyClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() + return app.client.Info(ctx, req) +} + +func (app *proxyClient) Query(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() + return app.client.Query(ctx, reqQuery) +} + +func (app *proxyClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() + return app.client.ListSnapshots(ctx, req) +} + +func (app *proxyClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() + return app.client.OfferSnapshot(ctx, req) +} + +func (app *proxyClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() + return app.client.LoadSnapshotChunk(ctx, req) +} + +func (app *proxyClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() + return app.client.ApplySnapshotChunk(ctx, req) +} + +// addTimeSample returns a function that, when called, adds an observation to m. +// The observation added to m is the number of seconds ellapsed since addTimeSample +// was initially called. addTimeSample is meant to be called in a defer to calculate +// the amount of time a function takes to complete. +func addTimeSample(m metrics.Histogram) func() { + start := time.Now() + return func() { m.Observe(time.Since(start).Seconds()) } +} diff --git a/internal/proxy/app_conn_test.go b/internal/proxy/client_test.go similarity index 60% rename from internal/proxy/app_conn_test.go rename to internal/proxy/client_test.go index 22f519657..ca32b99e8 100644 --- a/internal/proxy/app_conn_test.go +++ b/internal/proxy/client_test.go @@ -2,18 +2,26 @@ package proxy import ( "context" + "errors" "fmt" + "os" + "os/signal" "strings" + "syscall" "testing" + "time" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" abciclient "github.com/tendermint/tendermint/abci/client" + abcimocks "github.com/tendermint/tendermint/abci/client/mocks" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "gotest.tools/assert" ) //---------------------------------------- @@ -51,7 +59,10 @@ var SOCKET = "socket" func TestEcho(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) logger := log.TestingLogger() - clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + if err != nil { + t.Fatal(err) + } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -62,12 +73,9 @@ func TestEcho(t *testing.T) { t.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator(logger.With("module", "abci-client")) - require.NoError(t, err, "Error creating ABCI client:") - - require.NoError(t, cli.Start(ctx), "Error starting ABCI client") + require.NoError(t, client.Start(ctx), "Error starting ABCI client") - proxy := newAppConnTest(cli) + proxy := newAppConnTest(client) t.Log("Connected") for i := 0; i < 1000; i++ { @@ -91,7 +99,10 @@ func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) logger := log.TestingLogger() - clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + if err != nil { + b.Fatal(err) + } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -102,12 +113,9 @@ func BenchmarkEcho(b *testing.B) { b.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator(logger.With("module", "abci-client")) - require.NoError(b, err, "Error creating ABCI client") - - require.NoError(b, cli.Start(ctx), "Error starting ABCI client") + require.NoError(b, client.Start(ctx), "Error starting ABCI client") - proxy := newAppConnTest(cli) + proxy := newAppConnTest(client) b.Log("Connected") echoString := strings.Repeat(" ", 200) b.StartTimer() // Start benchmarking tests @@ -139,7 +147,10 @@ func TestInfo(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) logger := log.TestingLogger() - clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + if err != nil { + t.Fatal(err) + } // Start server s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) @@ -147,12 +158,9 @@ func TestInfo(t *testing.T) { t.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator(logger.With("module", "abci-client")) - require.NoError(t, err, "Error creating ABCI client") - - require.NoError(t, cli.Start(ctx), "Error starting ABCI client") + require.NoError(t, client.Start(ctx), "Error starting ABCI client") - proxy := newAppConnTest(cli) + proxy := newAppConnTest(client) t.Log("Connected") resInfo, err := proxy.Info(ctx, RequestInfo) @@ -162,3 +170,65 @@ func TestInfo(t *testing.T) { t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else") } } + +type noopStoppableClientImpl struct { + abciclient.Client + count int +} + +func (c *noopStoppableClientImpl) Stop() { c.count++ } + +func TestAppConns_Start_Stop(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientMock := &abcimocks.Client{} + clientMock.On("Start", mock.Anything).Return(nil) + clientMock.On("Error").Return(nil) + clientMock.On("IsRunning").Return(true) + clientMock.On("Wait").Return(nil).Times(1) + cl := &noopStoppableClientImpl{Client: clientMock} + + appConns := New(cl, log.TestingLogger(), NopMetrics()) + + err := appConns.Start(ctx) + require.NoError(t, err) + + time.Sleep(200 * time.Millisecond) + + cancel() + appConns.Wait() + + clientMock.AssertExpectations(t) + assert.Equal(t, 1, cl.count) +} + +// Upon failure, we call tmos.Kill +func TestAppConns_Failure(t *testing.T) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM, syscall.SIGABRT) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + clientMock := &abcimocks.Client{} + clientMock.On("SetLogger", mock.Anything).Return() + clientMock.On("Start", mock.Anything).Return(nil) + clientMock.On("IsRunning").Return(true) + clientMock.On("Wait").Return(nil) + clientMock.On("Error").Return(errors.New("EOF")) + cl := &noopStoppableClientImpl{Client: clientMock} + + appConns := New(cl, log.TestingLogger(), NopMetrics()) + + err := appConns.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { cancel(); appConns.Wait() }) + + select { + case sig := <-c: + t.Logf("signal %q successfully received", sig) + case <-ctx.Done(): + t.Fatal("expected process to receive SIGTERM signal") + } +} diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go deleted file mode 100644 index 61e9c9ff2..000000000 --- a/internal/proxy/multi_app_conn.go +++ /dev/null @@ -1,131 +0,0 @@ -package proxy - -import ( - "context" - "os" - "syscall" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" -) - -// AppConns is the Tendermint's interface to the application that consists of -// multiple connections. -type AppConns interface { - service.Service - - // Mempool connection - Mempool() AppConnMempool - // Consensus connection - Consensus() AppConnConsensus - // Query connection - Query() AppConnQuery - // Snapshot connection - Snapshot() AppConnSnapshot -} - -// NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { - return NewMultiAppConn(clientCreator, logger, metrics) -} - -// multiAppConn implements AppConns. -// -// A multiAppConn is made of a few appConns and manages their underlying abci -// clients. -// TODO: on app restart, clients must reboot together -type multiAppConn struct { - service.BaseService - logger log.Logger - - metrics *Metrics - consensusConn AppConnConsensus - mempoolConn AppConnMempool - queryConn AppConnQuery - snapshotConn AppConnSnapshot - - client stoppableClient - - clientCreator abciclient.Creator -} - -// TODO: this is a totally internal and quasi permanent shim for -// clients. eventually we can have a single client and have some kind -// of reasonable lifecycle witout needing an explicit stop method. -type stoppableClient interface { - abciclient.Client - Stop() -} - -// NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { - multiAppConn := &multiAppConn{ - logger: logger, - metrics: metrics, - clientCreator: clientCreator, - } - multiAppConn.BaseService = *service.NewBaseService(logger, "multiAppConn", multiAppConn) - return multiAppConn -} - -func (app *multiAppConn) Mempool() AppConnMempool { return app.mempoolConn } -func (app *multiAppConn) Consensus() AppConnConsensus { return app.consensusConn } -func (app *multiAppConn) Query() AppConnQuery { return app.queryConn } -func (app *multiAppConn) Snapshot() AppConnSnapshot { return app.snapshotConn } - -func (app *multiAppConn) OnStart(ctx context.Context) error { - var err error - defer func() { - if err != nil { - app.client.Stop() - } - }() - - var client abciclient.Client - client, err = app.clientCreator(app.logger) - if err != nil { - return err - } - - app.queryConn = NewAppConnQuery(client, app.metrics) - app.snapshotConn = NewAppConnSnapshot(client, app.metrics) - app.mempoolConn = NewAppConnMempool(client, app.metrics) - app.consensusConn = NewAppConnConsensus(client, app.metrics) - - app.client = client.(stoppableClient) - - // Kill Tendermint if the ABCI application crashes. - go func() { - if !client.IsRunning() { - return - } - app.client.Wait() - if ctx.Err() != nil { - return - } - - if err := app.client.Error(); err != nil { - app.logger.Error("client connection terminated. Did the application crash? Please restart tendermint", - "err", err) - if killErr := kill(); killErr != nil { - app.logger.Error("Failed to kill this process - please do so manually", - "err", killErr) - } - } - - }() - - return client.Start(ctx) -} - -func (app *multiAppConn) OnStop() { app.client.Stop() } - -func kill() error { - p, err := os.FindProcess(os.Getpid()) - if err != nil { - return err - } - - return p.Signal(syscall.SIGTERM) -} diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go deleted file mode 100644 index efbb3f56f..000000000 --- a/internal/proxy/multi_app_conn_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package proxy - -import ( - "context" - "errors" - "os" - "os/signal" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - abcimocks "github.com/tendermint/tendermint/abci/client/mocks" - "github.com/tendermint/tendermint/libs/log" -) - -type noopStoppableClientImpl struct { - abciclient.Client - count int -} - -func (c *noopStoppableClientImpl) Stop() { c.count++ } - -func TestAppConns_Start_Stop(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - clientMock := &abcimocks.Client{} - clientMock.On("Start", mock.Anything).Return(nil) - clientMock.On("Error").Return(nil) - clientMock.On("IsRunning").Return(true) - clientMock.On("Wait").Return(nil).Times(1) - cl := &noopStoppableClientImpl{Client: clientMock} - - creatorCallCount := 0 - creator := func(logger log.Logger) (abciclient.Client, error) { - creatorCallCount++ - return cl, nil - } - - appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) - - err := appConns.Start(ctx) - require.NoError(t, err) - - time.Sleep(200 * time.Millisecond) - - cancel() - appConns.Wait() - - clientMock.AssertExpectations(t) - assert.Equal(t, 1, cl.count) - assert.Equal(t, 1, creatorCallCount) -} - -// Upon failure, we call tmos.Kill -func TestAppConns_Failure(t *testing.T) { - ok := make(chan struct{}) - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM) - go func() { - for range c { - close(ok) - return - } - }() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - clientMock := &abcimocks.Client{} - clientMock.On("SetLogger", mock.Anything).Return() - clientMock.On("Start", mock.Anything).Return(nil) - clientMock.On("IsRunning").Return(true) - clientMock.On("Wait").Return(nil) - clientMock.On("Error").Return(errors.New("EOF")) - cl := &noopStoppableClientImpl{Client: clientMock} - - creator := func(log.Logger) (abciclient.Client, error) { - return cl, nil - } - - appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) - - err := appConns.Start(ctx) - require.NoError(t, err) - t.Cleanup(func() { cancel(); appConns.Wait() }) - - select { - case <-ok: - t.Log("SIGTERM successfully received") - case <-time.After(5 * time.Second): - t.Fatal("expected process to receive SIGTERM signal") - } -} diff --git a/internal/pubsub/pubsub.go b/internal/pubsub/pubsub.go index 707f9cb13..df2dd90e3 100644 --- a/internal/pubsub/pubsub.go +++ b/internal/pubsub/pubsub.go @@ -153,26 +153,6 @@ func BufferCapacity(cap int) Option { // BufferCapacity returns capacity of the publication queue. func (s *Server) BufferCapacity() int { return cap(s.queue) } -// Subscribe creates a subscription for the given client ID and query. -// If len(capacities) > 0, its first value is used as the queue capacity. -// -// Deprecated: Use SubscribeWithArgs. This method will be removed in v0.36. -func (s *Server) Subscribe(ctx context.Context, clientID string, query *query.Query, capacities ...int) (*Subscription, error) { - args := SubscribeArgs{ - ClientID: clientID, - Query: query, - Limit: 1, - } - if len(capacities) > 0 { - args.Limit = capacities[0] - if len(capacities) > 1 { - args.Quota = capacities[1] - } - // bounds are checked below - } - return s.SubscribeWithArgs(ctx, args) -} - // Observe registers an observer function that will be called synchronously // with each published message matching any of the given queries, prior to it // being forwarded to any subscriber. If no queries are specified, all diff --git a/internal/pubsub/query/syntax/syntax_test.go b/internal/pubsub/query/syntax/syntax_test.go index 9ce5fa735..ac0473beb 100644 --- a/internal/pubsub/query/syntax/syntax_test.go +++ b/internal/pubsub/query/syntax/syntax_test.go @@ -55,7 +55,7 @@ func TestScanner(t *testing.T) { got = append(got, s.Token()) } if err := s.Err(); err != io.EOF { - t.Errorf("Next: unexpected error: %w", err) + t.Errorf("Next: unexpected error: %v", err) } if !reflect.DeepEqual(got, test.want) { diff --git a/internal/rpc/core/abci.go b/internal/rpc/core/abci.go index cbd27a09d..8f5e61d55 100644 --- a/internal/rpc/core/abci.go +++ b/internal/rpc/core/abci.go @@ -18,7 +18,7 @@ func (env *Environment) ABCIQuery( height int64, prove bool, ) (*coretypes.ResultABCIQuery, error) { - resQuery, err := env.ProxyAppQuery.Query(ctx, abci.RequestQuery{ + resQuery, err := env.ProxyApp.Query(ctx, abci.RequestQuery{ Path: path, Data: data, Height: height, @@ -34,7 +34,7 @@ func (env *Environment) ABCIQuery( // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info func (env *Environment) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { - resInfo, err := env.ProxyAppQuery.Info(ctx, proxy.RequestInfo) + resInfo, err := env.ProxyApp.Info(ctx, proxy.RequestInfo) if err != nil { return nil, err } diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 6258dc060..26044aef7 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -23,9 +23,7 @@ import ( // order (highest first). // // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain -func (env *Environment) BlockchainInfo( - ctx context.Context, - minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { +func (env *Environment) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -193,8 +191,6 @@ func (env *Environment) Commit(ctx context.Context, heightPtr *int64) (*coretype // If no height is provided, it will fetch results for the latest block. // // Results are for the height of the block containing the txs. -// Thus response.results.deliver_tx[5] is the results of executing -// getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) @@ -208,13 +204,13 @@ func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*co } var totalGasUsed int64 - for _, tx := range results.FinalizeBlock.GetTxs() { - totalGasUsed += tx.GetGasUsed() + for _, res := range results.FinalizeBlock.GetTxResults() { + totalGasUsed += res.GetGasUsed() } return &coretypes.ResultBlockResults{ Height: height, - TxsResults: results.FinalizeBlock.Txs, + TxsResults: results.FinalizeBlock.TxResults, TotalGasUsed: totalGasUsed, FinalizeBlockEvents: results.FinalizeBlock.Events, ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates, @@ -222,8 +218,8 @@ func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*co }, nil } -// BlockSearch searches for a paginated set of blocks matching BeginBlock and -// EndBlock event search criteria. +// BlockSearch searches for a paginated set of blocks matching the provided +// query. func (env *Environment) BlockSearch( ctx context.Context, query string, diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index 4baff9d38..c48ac4c48 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -72,7 +72,7 @@ func TestBlockchainInfo(t *testing.T) { func TestBlockResults(t *testing.T) { results := &tmstate.ABCIResponses{ FinalizeBlock: &abci.ResponseFinalizeBlock{ - Txs: []*abci.ResponseDeliverTx{ + TxResults: []*abci.ExecTxResult{ {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, {Code: 1, Log: "not ok", GasUsed: 0}, @@ -99,7 +99,7 @@ func TestBlockResults(t *testing.T) { {101, true, nil}, {100, false, &coretypes.ResultBlockResults{ Height: 100, - TxsResults: results.FinalizeBlock.Txs, + TxsResults: results.FinalizeBlock.TxResults, TotalGasUsed: 15, FinalizeBlockEvents: results.FinalizeBlock.Events, ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates, diff --git a/internal/rpc/core/consensus.go b/internal/rpc/core/consensus.go index 6acdcc333..b30209b38 100644 --- a/internal/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -14,10 +14,7 @@ import ( // for the validators in the set as used in computing their Merkle root. // // More: https://docs.tendermint.com/master/rpc/#/Info/validators -func (env *Environment) Validators( - ctx context.Context, - heightPtr *int64, - pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) { +func (env *Environment) Validators(ctx context.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) @@ -86,7 +83,8 @@ func (env *Environment) DumpConsensusState(ctx context.Context) (*coretypes.Resu } return &coretypes.ResultDumpConsensusState{ RoundState: roundState, - Peers: peerStates}, nil + Peers: peerStates, + }, nil } // ConsensusState returns a concise summary of the consensus state. diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 5a718b232..24f43a4a7 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -11,6 +11,7 @@ import ( "github.com/rs/cors" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/blocksync" @@ -19,7 +20,6 @@ import ( "github.com/tendermint/tendermint/internal/eventlog" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/proxy" tmpubsub "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/internal/pubsub/query" sm "github.com/tendermint/tendermint/internal/state" @@ -57,12 +57,6 @@ type consensusState interface { GetRoundStateSimpleJSON() ([]byte, error) } -type transport interface { - Listeners() []string - IsListening() bool - NodeInfo() types.NodeInfo -} - type peerManager interface { Peers() []types.NodeID Addresses(types.NodeID) []p2p.NodeAddress @@ -73,8 +67,7 @@ type peerManager interface { // to be setup once during startup. type Environment struct { // external, thread safe interfaces - ProxyAppQuery proxy.AppConnQuery - ProxyAppMempool proxy.AppConnMempool + ProxyApp abciclient.Client // interfaces defined in types and above StateStore sm.Store @@ -84,8 +77,9 @@ type Environment struct { ConsensusReactor *consensus.Reactor BlockSyncReactor *blocksync.Reactor - // Legacy p2p stack - P2PTransport transport + IsListening bool + Listeners []string + NodeInfo types.NodeInfo // interfaces for new p2p interfaces PeerManager peerManager @@ -226,6 +220,10 @@ func (env *Environment) StartService(ctx context.Context, conf *config.Config) ( return nil, err } + env.Listeners = []string{ + fmt.Sprintf("Listener(@%v)", conf.P2P.ExternalAddress), + } + listenAddrs := strings.SplitAndTrimEmpty(conf.RPC.ListenAddress, ",", " ") routes := NewRoutesMap(env, &RouteOptions{ Unsafe: conf.RPC.Unsafe, diff --git a/internal/rpc/core/events.go b/internal/rpc/core/events.go index bc21fadc6..4e0d2ac8a 100644 --- a/internal/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -165,8 +165,11 @@ func (env *Environment) Events(ctx context.Context, maxItems = 100 } + const minWaitTime = 1 * time.Second const maxWaitTime = 30 * time.Second - if waitTime > maxWaitTime { + if waitTime < minWaitTime { + waitTime = minWaitTime + } else if waitTime > maxWaitTime { waitTime = maxWaitTime } @@ -185,7 +188,7 @@ func (env *Environment) Events(ctx context.Context, accept := func(itm *eventlog.Item) error { // N.B. We accept up to one item more than requested, so we can tell how // to set the "more" flag in the response. - if len(items) > maxItems { + if len(items) > maxItems || itm.Cursor.Before(after) { return eventlog.ErrStopScan } if cursorInRange(itm.Cursor, before, after) && query.Matches(itm.Events) { @@ -194,7 +197,7 @@ func (env *Environment) Events(ctx context.Context, return nil } - if waitTime > 0 && before.IsZero() { + if before.IsZero() { ctx, cancel := context.WithTimeout(ctx, waitTime) defer cancel() diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 61d36e93a..c2a9084db 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -114,10 +114,10 @@ func (env *Environment) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*co } return &coretypes.ResultBroadcastTxCommit{ - CheckTx: *r, - DeliverTx: txres.TxResult, - Hash: tx.Hash(), - Height: txres.Height, + CheckTx: *r, + TxResult: txres.TxResult, + Hash: tx.Hash(), + Height: txres.Height, }, nil } } @@ -158,7 +158,7 @@ func (env *Environment) NumUnconfirmedTxs(ctx context.Context) (*coretypes.Resul // be added to the mempool either. // More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx func (env *Environment) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { - res, err := env.ProxyAppMempool.CheckTx(ctx, abci.RequestCheckTx{Tx: tx}) + res, err := env.ProxyApp.CheckTx(ctx, abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err } diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go index 3cead393c..5444b77b7 100644 --- a/internal/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -27,8 +27,8 @@ func (env *Environment) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, } return &coretypes.ResultNetInfo{ - Listening: env.P2PTransport.IsListening(), - Listeners: env.P2PTransport.Listeners(), + Listening: env.IsListening, + Listeners: env.Listeners, NPeers: len(peers), Peers: peers, }, nil diff --git a/internal/rpc/core/status.go b/internal/rpc/core/status.go index 2f648978a..46b8a6fcd 100644 --- a/internal/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -66,7 +66,7 @@ func (env *Environment) Status(ctx context.Context) (*coretypes.ResultStatus, er } result := &coretypes.ResultStatus{ - NodeInfo: env.P2PTransport.NodeInfo(), + NodeInfo: env.NodeInfo, ApplicationInfo: applicationInfo, SyncInfo: coretypes.SyncInfo{ LatestBlockHash: latestBlockHash, diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 126875d0d..73fa6d2c8 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -36,19 +36,16 @@ func (env *Environment) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) return nil, fmt.Errorf("tx (%X) not found, err: %w", hash, err) } - height := r.Height - index := r.Index - var proof types.TxProof if prove { - block := env.BlockStore.LoadBlock(height) - proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + block := env.BlockStore.LoadBlock(r.Height) + proof = block.Data.Txs.Proof(int(r.Index)) } return &coretypes.ResultTx{ Hash: hash, - Height: height, - Index: index, + Height: r.Height, + Index: r.Index, TxResult: r.Result, Tx: r.Tx, Proof: proof, @@ -127,7 +124,7 @@ func (env *Environment) TxSearch( var proof types.TxProof if prove { block := env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines + proof = block.Data.Txs.Proof(int(r.Index)) } apiResults = append(apiResults, &coretypes.ResultTx{ diff --git a/internal/state/execution.go b/internal/state/execution.go index cdd6e009b..3423b00f6 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -2,17 +2,18 @@ package state import ( "context" - "errors" "fmt" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmtypes "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -30,7 +31,7 @@ type BlockExecutor struct { blockStore BlockStore // execute the app against this - proxyApp proxy.AppConnConsensus + appClient abciclient.Client // events eventBus types.BlockEventPublisher @@ -60,16 +61,17 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { func NewBlockExecutor( stateStore Store, logger log.Logger, - proxyApp proxy.AppConnConsensus, + appClient abciclient.Client, pool mempool.Mempool, evpool EvidencePool, blockStore BlockStore, + eventBus *eventbus.EventBus, options ...BlockExecutorOption, ) *BlockExecutor { res := &BlockExecutor{ + eventBus: eventBus, store: stateStore, - proxyApp: proxyApp, - eventBus: eventbus.NopEventBus{}, + appClient: appClient, mempool: pool, evpool: evpool, logger: logger, @@ -89,12 +91,6 @@ func (blockExec *BlockExecutor) Store() Store { return blockExec.store } -// SetEventBus - sets the event bus for publishing block related events. -// If not called, it defaults to types.NopEventBus. -func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { - blockExec.eventBus = eventBus -} - // CreateProposalBlock calls state.MakeBlock with evidence from the evpool // and txs from the mempool. The max bytes must be big enough to fit the commit. // Up to 1/10th of the block space is allcoated for maximum sized evidence. @@ -104,10 +100,11 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) func (blockExec *BlockExecutor) CreateProposalBlock( ctx context.Context, height int64, - state State, commit *types.Commit, + state State, + commit *types.Commit, proposerAddr []byte, votes []*types.Vote, -) (*types.Block, *types.PartSet, error) { +) (*types.Block, error) { maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas @@ -118,13 +115,18 @@ func (blockExec *BlockExecutor) CreateProposalBlock( maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + block := state.MakeBlock(height, txs, commit, evidence, proposerAddr) - preparedProposal, err := blockExec.proxyApp.PrepareProposal( + localLastCommit := buildLastCommitInfo(block, blockExec.store, state.InitialHeight) + rpp, err := blockExec.appClient.PrepareProposal( ctx, abci.RequestPrepareProposal{ - BlockData: txs.ToSliceOfBytes(), - BlockDataSize: maxDataBytes, - Votes: types.VotesToProto(votes), + Hash: block.Hash(), + Header: *block.Header.ToProto(), + Txs: block.Txs.ToSliceOfBytes(), + LocalLastCommit: extendedCommitInfo(localLastCommit, votes), + ByzantineValidators: block.Evidence.ToABCI(), + MaxTxBytes: maxDataBytes, }, ) if err != nil { @@ -138,19 +140,28 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // purpose for now. panic(err) } - newTxs := preparedProposal.GetBlockData() - var txSize int - for _, tx := range newTxs { - txSize += len(tx) - if maxDataBytes < int64(txSize) { - panic("block data exceeds max amount of allowed bytes") - } + if !rpp.ModifiedTx { + return block, nil } + txrSet := types.NewTxRecordSet(rpp.TxRecords) - modifiedTxs := types.ToTxs(preparedProposal.GetBlockData()) + if err := txrSet.Validate(maxDataBytes, block.Txs); err != nil { + return nil, err + } - return state.MakeBlock(height, modifiedTxs, commit, evidence, proposerAddr) + for _, rtx := range txrSet.RemovedTxs() { + if err := blockExec.mempool.RemoveTxByKey(rtx.Key()); err != nil { + blockExec.logger.Debug("error removing transaction from the mempool", "error", err, "tx hash", rtx.Hash()) + } + } + for _, atx := range txrSet.AddedTxs() { + if err := blockExec.mempool.CheckTx(ctx, atx, nil, mempool.TxInfo{}); err != nil { + blockExec.logger.Error("error adding tx to the mempool", "error", err, "tx hash", atx.Hash()) + } + } + itxs := txrSet.IncludedTxs() + return state.MakeBlock(height, itxs, commit, evidence, proposerAddr), nil } func (blockExec *BlockExecutor) ProcessProposal( @@ -162,11 +173,11 @@ func (blockExec *BlockExecutor) ProcessProposal( Hash: block.Header.Hash(), Header: *block.Header.ToProto(), Txs: block.Data.Txs.ToSliceOfBytes(), - LastCommitInfo: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), ByzantineValidators: block.Evidence.ToABCI(), } - resp, err := blockExec.proxyApp.ProcessProposal(ctx, req) + resp, err := blockExec.appClient.ProcessProposal(ctx, req) if err != nil { return false, ErrInvalidBlock(err) } @@ -207,18 +218,22 @@ func (blockExec *BlockExecutor) ValidateBlock(ctx context.Context, state State, func (blockExec *BlockExecutor) ApplyBlock( ctx context.Context, state State, - blockID types.BlockID, - block *types.Block, -) (State, error) { - + blockID types.BlockID, block *types.Block) (State, error) { // validate the block if we haven't already if err := blockExec.ValidateBlock(ctx, state, block); err != nil { return state, ErrInvalidBlock(err) } - startTime := time.Now().UnixNano() - abciResponses, err := execBlockOnProxyApp(ctx, - blockExec.logger, blockExec.proxyApp, block, blockExec.store, state.InitialHeight, + pbh := block.Header.ToProto() + finalizeBlockResponse, err := blockExec.appClient.FinalizeBlock( + ctx, + abci.RequestFinalizeBlock{ + Hash: block.Hash(), + Header: *pbh, + Txs: block.Txs.ToSliceOfBytes(), + DecidedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + ByzantineValidators: block.Evidence.ToABCI(), + }, ) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) @@ -226,19 +241,22 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, ErrProxyAppConn(err) } + abciResponses := &tmstate.ABCIResponses{ + FinalizeBlock: finalizeBlockResponse, + } + // Save the results before we commit. if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { return state, err } // validate the validator updates and convert to tendermint types - abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates - err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator) + err = validateValidatorUpdates(finalizeBlockResponse.ValidatorUpdates, state.ConsensusParams.Validator) if err != nil { return state, fmt.Errorf("error in validator updates: %w", err) } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(finalizeBlockResponse.ValidatorUpdates) if err != nil { return state, err } @@ -247,13 +265,18 @@ func (blockExec *BlockExecutor) ApplyBlock( } // Update the state with the block and responses. - state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(finalizeBlockResponse.TxResults) + if err != nil { + return state, fmt.Errorf("marshaling TxResults: %w", err) + } + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(blockID, &block.Header, h, finalizeBlockResponse.ConsensusParamUpdates, validatorUpdates) if err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } // Lock mempool, commit app state, update mempoool. - appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.FinalizeBlock.Txs) + appHash, retainHeight, err := blockExec.Commit(ctx, state, block, finalizeBlockResponse.TxResults) if err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } @@ -282,7 +305,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(ctx, blockExec.logger, blockExec.eventBus, block, blockID, abciResponses, validatorUpdates) + fireEvents(ctx, blockExec.logger, blockExec.eventBus, block, blockID, finalizeBlockResponse, validatorUpdates) return state, nil } @@ -292,7 +315,7 @@ func (blockExec *BlockExecutor) ExtendVote(ctx context.Context, vote *types.Vote Vote: vote.ToProto(), } - resp, err := blockExec.proxyApp.ExtendVote(ctx, req) + resp, err := blockExec.appClient.ExtendVote(ctx, req) if err != nil { return types.VoteExtension{}, err } @@ -304,7 +327,7 @@ func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *t Vote: vote.ToProto(), } - resp, err := blockExec.proxyApp.VerifyVoteExtension(ctx, req) + resp, err := blockExec.appClient.VerifyVoteExtension(ctx, req) if err != nil { return err } @@ -326,7 +349,7 @@ func (blockExec *BlockExecutor) Commit( ctx context.Context, state State, block *types.Block, - deliverTxResponses []*abci.ResponseDeliverTx, + txResults []*abci.ExecTxResult, ) ([]byte, int64, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -340,7 +363,7 @@ func (blockExec *BlockExecutor) Commit( } // Commit block, get hash back - res, err := blockExec.proxyApp.Commit(ctx) + res, err := blockExec.appClient.Commit(ctx) if err != nil { blockExec.logger.Error("client error during proxyAppConn.Commit", "err", err) return nil, 0, err @@ -359,63 +382,19 @@ func (blockExec *BlockExecutor) Commit( ctx, block.Height, block.Txs, - deliverTxResponses, - TxPreCheck(state), - TxPostCheck(state), + txResults, + TxPreCheckForState(state), + TxPostCheckForState(state), ) return res.Data, res.RetainHeight, err } -//--------------------------------------------------------- -// Helper functions for executing blocks and updating state - -// Executes block's transactions on proxyAppConn. -// Returns a list of transaction results and updates to the validator set -func execBlockOnProxyApp( - ctx context.Context, - logger log.Logger, - proxyAppConn proxy.AppConnConsensus, - block *types.Block, - store Store, - initialHeight int64, -) (*tmstate.ABCIResponses, error) { - abciResponses := new(tmstate.ABCIResponses) - abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{} - dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs)) - abciResponses.FinalizeBlock.Txs = dtxs - - // Begin block - var err error - pbh := block.Header.ToProto() - if pbh == nil { - return nil, errors.New("nil header") - } - - abciResponses.FinalizeBlock, err = proxyAppConn.FinalizeBlock( - ctx, - abci.RequestFinalizeBlock{ - Txs: block.Txs.ToSliceOfBytes(), - Hash: block.Hash(), - Header: *pbh, - Height: block.Height, - LastCommitInfo: buildLastCommitInfo(block, store, initialHeight), - ByzantineValidators: block.Evidence.ToABCI(), - }, - ) - if err != nil { - logger.Error("error in proxyAppConn.FinalizeBlock", "err", err) - return nil, err - } - logger.Info("executed block", "height", block.Height) - return abciResponses, nil -} - -func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.LastCommitInfo { +func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { if block.Height == initialHeight { // there is no last commmit for the initial height. // return an empty value. - return abci.LastCommitInfo{} + return abci.CommitInfo{} } lastValSet, err := store.LoadValidators(block.Height - 1) @@ -446,12 +425,30 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a } } - return abci.LastCommitInfo{ + return abci.CommitInfo{ Round: block.LastCommit.Round, Votes: votes, } } +func extendedCommitInfo(c abci.CommitInfo, votes []*types.Vote) abci.ExtendedCommitInfo { + vs := make([]abci.ExtendedVoteInfo, len(c.Votes)) + for i := range vs { + vs[i] = abci.ExtendedVoteInfo{ + Validator: c.Votes[i].Validator, + SignedLastBlock: c.Votes[i].SignedLastBlock, + /* + TODO: Include vote extensions information when implementing vote extensions. + VoteExtension: []byte{}, + */ + } + } + return abci.ExtendedCommitInfo{ + Round: c.Round, + Votes: vs, + } +} + func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error { for _, valUpdate := range abciUpdates { @@ -477,16 +474,16 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, return nil } -// updateState returns a new State updated according to the header and responses. -func updateState( - state State, +// Update returns a copy of state with the fields set using the arguments passed in. +func (state State) Update( blockID types.BlockID, header *types.Header, - abciResponses *tmstate.ABCIResponses, + resultsHash []byte, + consensusParamUpdates *tmtypes.ConsensusParams, validatorUpdates []*types.Validator, ) (State, error) { - // Copy the valset so we can apply changes from EndBlock + // Copy the valset so we can apply changes from FinalizeBlock // and update s.LastValidators and s.Validators. nValSet := state.NextValidators.Copy() @@ -507,9 +504,9 @@ func updateState( // Update the params with the latest abciResponses. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged - if abciResponses.FinalizeBlock.ConsensusParamUpdates != nil { - // NOTE: must not mutate s.ConsensusParams - nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.FinalizeBlock.ConsensusParamUpdates) + if consensusParamUpdates != nil { + // NOTE: must not mutate state.ConsensusParams + nextParams = state.ConsensusParams.UpdateConsensusParams(consensusParamUpdates) err := nextParams.ValidateConsensusParams() if err != nil { return state, fmt.Errorf("error updating consensus params: %w", err) @@ -538,7 +535,7 @@ func updateState( LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, LastHeightConsensusParamsChanged: lastHeightParamsChanged, - LastResultsHash: ABCIResponsesResultsHash(abciResponses), + LastResultsHash: resultsHash, AppHash: nil, }, nil } @@ -552,13 +549,13 @@ func fireEvents( eventBus types.BlockEventPublisher, block *types.Block, blockID types.BlockID, - abciResponses *tmstate.ABCIResponses, + finalizeBlockResponse *abci.ResponseFinalizeBlock, validatorUpdates []*types.Validator, ) { if err := eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{ Block: block, BlockID: blockID, - ResultFinalizeBlock: *abciResponses.FinalizeBlock, + ResultFinalizeBlock: *finalizeBlockResponse, }); err != nil { logger.Error("failed publishing new block", "err", err) } @@ -566,7 +563,7 @@ func fireEvents( if err := eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{ Header: block.Header, NumTxs: int64(len(block.Txs)), - ResultFinalizeBlock: *abciResponses.FinalizeBlock, + ResultFinalizeBlock: *finalizeBlockResponse, }); err != nil { logger.Error("failed publishing new block header", "err", err) } @@ -583,9 +580,9 @@ func fireEvents( } // sanity check - if len(abciResponses.FinalizeBlock.Txs) != len(block.Data.Txs) { + if len(finalizeBlockResponse.TxResults) != len(block.Data.Txs) { panic(fmt.Sprintf("number of TXs (%d) and ABCI TX responses (%d) do not match", - len(block.Data.Txs), len(abciResponses.FinalizeBlock.Txs))) + len(block.Data.Txs), len(finalizeBlockResponse.TxResults))) } for i, tx := range block.Data.Txs { @@ -594,14 +591,14 @@ func fireEvents( Height: block.Height, Index: uint32(i), Tx: tx, - Result: *(abciResponses.FinalizeBlock.Txs[i]), + Result: *(finalizeBlockResponse.TxResults[i]), }, }); err != nil { logger.Error("failed publishing event TX", "err", err) } } - if len(validatorUpdates) > 0 { + if len(finalizeBlockResponse.ValidatorUpdates) > 0 { if err := eventBus.PublishEventValidatorSetUpdates(ctx, types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil { logger.Error("failed publishing event", "err", err) @@ -617,30 +614,41 @@ func fireEvents( func ExecCommitBlock( ctx context.Context, be *BlockExecutor, - appConnConsensus proxy.AppConnConsensus, + appConn abciclient.Client, block *types.Block, logger log.Logger, store Store, initialHeight int64, s State, ) ([]byte, error) { - abciResponses, err := execBlockOnProxyApp(ctx, logger, appConnConsensus, block, store, initialHeight) + pbh := block.Header.ToProto() + finalizeBlockResponse, err := appConn.FinalizeBlock( + ctx, + abci.RequestFinalizeBlock{ + Hash: block.Hash(), + Header: *pbh, + Txs: block.Txs.ToSliceOfBytes(), + DecidedLastCommit: buildLastCommitInfo(block, store, initialHeight), + ByzantineValidators: block.Evidence.ToABCI(), + }, + ) + if err != nil { - logger.Error("failed executing block on proxy app", "height", block.Height, "err", err) + logger.Error("executing block", "err", err) return nil, err } + logger.Info("executed block", "height", block.Height) // the BlockExecutor condition is using for the final block replay process. if be != nil { - abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates - err = validateValidatorUpdates(abciValUpdates, s.ConsensusParams.Validator) + err = validateValidatorUpdates(finalizeBlockResponse.ValidatorUpdates, s.ConsensusParams.Validator) if err != nil { - logger.Error("err", err) + logger.Error("validating validator updates", "err", err) return nil, err } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(finalizeBlockResponse.ValidatorUpdates) if err != nil { - logger.Error("err", err) + logger.Error("converting validator updates to native types", "err", err) return nil, err } @@ -650,11 +658,11 @@ func ExecCommitBlock( } blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - fireEvents(ctx, be.logger, be.eventBus, block, blockID, abciResponses, validatorUpdates) + fireEvents(ctx, be.logger, be.eventBus, block, blockID, finalizeBlockResponse, validatorUpdates) } // Commit block, get hash back - res, err := appConnConsensus.Commit(ctx) + res, err := appConn.Commit(ctx) if err != nil { logger.Error("client error during proxyAppConn.Commit", "err", res) return nil, err diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 636e654e7..e62735a4a 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -18,7 +18,7 @@ import ( "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/eventbus" - mmock "github.com/tendermint/tendermint/internal/mempool/mock" + mpmocks "github.com/tendermint/tendermint/internal/mempool/mocks" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" @@ -27,7 +27,6 @@ import ( "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -39,24 +38,35 @@ var ( func TestApplyBlock(t *testing.T) { app := &testApp{} - cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() - proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := proxyApp.Start(ctx) - require.NoError(t, err) + require.NoError(t, proxyApp.Start(ctx)) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) state, stateDB, _ := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) - blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), - mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - - block, err := sf.MakeBlock(state, 1, new(types.Commit)) - require.NoError(t, err) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, eventBus) + + block := sf.MakeBlock(state, 1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} @@ -68,85 +78,91 @@ func TestApplyBlock(t *testing.T) { assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") } -// TestBeginBlockValidators ensures we send absent validators list. -func TestBeginBlockValidators(t *testing.T) { +// TestFinalizeBlockDecidedLastCommit ensures we correctly send the DecidedLastCommit to the +// application. The test ensures that the DecidedLastCommit properly reflects +// which validators signed the preceding block. +func TestFinalizeBlockDecidedLastCommit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + logger := log.TestingLogger() app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, app) + appClient := proxy.New(cc, logger, proxy.NopMetrics()) - err := proxyApp.Start(ctx) + err := appClient.Start(ctx) require.NoError(t, err) - state, stateDB, _ := makeState(t, 2, 2) + state, stateDB, privVals := makeState(t, 7, 1) stateStore := sm.NewStore(stateDB) - - prevHash := state.LastBlockID.Hash - prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{Hash: prevHash, PartSetHeader: prevParts} - - var ( - now = tmtime.Now() - commitSig0 = types.NewCommitSigForBlock( - []byte("Signature1"), - state.Validators.Validators[0].Address, - now, - types.VoteExtensionToSign{}, - ) - commitSig1 = types.NewCommitSigForBlock( - []byte("Signature2"), - state.Validators.Validators[1].Address, - now, - types.VoteExtensionToSign{}, - ) - absentSig = types.NewCommitSigAbsent() - ) + absentSig := types.NewCommitSigAbsent() testCases := []struct { - desc string - lastCommitSigs []types.CommitSig - expectedAbsentValidators []int + name string + absentCommitSigs map[int]bool }{ - {"none absent", []types.CommitSig{commitSig0, commitSig1}, []int{}}, - {"one absent", []types.CommitSig{commitSig0, absentSig}, []int{1}}, - {"multiple absent", []types.CommitSig{absentSig, absentSig}, []int{0, 1}}, + {"none absent", map[int]bool{}}, + {"one absent", map[int]bool{1: true}}, + {"multiple absent", map[int]bool{1: true, 3: true}}, } for _, tc := range testCases { - lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) - - // block for height 2 - block, err := sf.MakeBlock(state, 2, lastCommit) - require.NoError(t, err) - - _, err = sm.ExecCommitBlock(ctx, nil, proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1, state) - require.NoError(t, err, tc.desc) - - // -> app receives a list of validators with a bool indicating if they signed - ctr := 0 - for i, v := range app.CommitVotes { - if ctr < len(tc.expectedAbsentValidators) && - tc.expectedAbsentValidators[ctr] == i { + t.Run(tc.name, func(t *testing.T) { + blockStore := store.NewBlockStore(dbm.NewMemDB()) + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, 0) + evpool.On("Update", ctx, mock.Anything, mock.Anything).Return() + evpool.On("CheckEvidence", ctx, mock.Anything).Return(nil) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), appClient, mp, evpool, blockStore, eventBus) + state, _, lastCommit := makeAndCommitGoodBlock(ctx, t, state, 1, new(types.Commit), state.NextValidators.Validators[0].Address, blockExec, privVals, nil) + + for idx, isAbsent := range tc.absentCommitSigs { + if isAbsent { + lastCommit.Signatures[idx] = absentSig + } + } - assert.False(t, v.SignedLastBlock) - ctr++ - } else { - assert.True(t, v.SignedLastBlock) + // block for height 2 + block := sf.MakeBlock(state, 2, lastCommit) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} + _, err = blockExec.ApplyBlock(ctx, state, blockID, block) + require.NoError(t, err) + + // -> app receives a list of validators with a bool indicating if they signed + for i, v := range app.CommitVotes { + _, absent := tc.absentCommitSigs[i] + assert.Equal(t, !absent, v.SignedLastBlock) } - } + }) } } -// TestBeginBlockByzantineValidators ensures we send byzantine validators list. -func TestBeginBlockByzantineValidators(t *testing.T) { +// TestFinalizeBlockByzantineValidators ensures we send byzantine validators list. +func TestFinalizeBlockByzantineValidators(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + logger := log.TestingLogger() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) err := proxyApp.Start(ctx) require.NoError(t, err) @@ -219,14 +235,27 @@ func TestBeginBlockByzantineValidators(t *testing.T) { evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return(ev, int64(100)) evpool.On("Update", ctx, mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() evpool.On("CheckEvidence", ctx, mock.AnythingOfType("types.EvidenceList")).Return(nil) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) blockStore := store.NewBlockStore(dbm.NewMemDB()) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mmock.Mempool{}, evpool, blockStore) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp, + mp, evpool, blockStore, eventBus) - block, err := sf.MakeBlock(state, 1, new(types.Commit)) - require.NoError(t, err) + block := sf.MakeBlock(state, 1, new(types.Commit)) block.Evidence = ev block.Header.EvidenceHash = block.Evidence.Hash() bps, err := block.MakePartSet(testPartSize) @@ -248,9 +277,9 @@ func TestProcessProposal(t *testing.T) { defer cancel() app := abcimocks.NewBaseMock() - cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() - proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) err := proxyApp.Start(ctx) require.NoError(t, err) @@ -258,17 +287,20 @@ func TestProcessProposal(t *testing.T) { stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), - mmock.Mempool{}, + proxyApp, + new(mpmocks.Mempool), sm.EmptyEvidencePool{}, blockStore, + eventBus, ) - block0, err := sf.MakeBlock(state, height-1, new(types.Commit)) - require.NoError(t, err) + block0 := sf.MakeBlock(state, height-1, new(types.Commit)) lastCommitSig := []types.CommitSig{} partSet, err := block0.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) @@ -292,8 +324,7 @@ func TestProcessProposal(t *testing.T) { } lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, lastCommitSig) - block1, err := sf.MakeBlock(state, height, lastCommit) - require.NoError(t, err) + block1 := sf.MakeBlock(state, height, lastCommit) block1.Txs = txs expectedRpp := abci.RequestProcessProposal{ @@ -301,7 +332,7 @@ func TestProcessProposal(t *testing.T) { Header: *block1.Header.ToProto(), Txs: block1.Txs.ToSliceOfBytes(), ByzantineValidators: block1.Evidence.ToABCI(), - LastCommitInfo: abci.LastCommitInfo{ + ProposedLastCommit: abci.CommitInfo{ Round: 0, Votes: voteInfos, }, @@ -445,46 +476,54 @@ func TestUpdateValidators(t *testing.T) { } } -// TestEndBlockValidatorUpdates ensures we update validator set and send an event. -func TestEndBlockValidatorUpdates(t *testing.T) { +// TestFinalizeBlockValidatorUpdates ensures we update validator set and send an event. +func TestFinalizeBlockValidatorUpdates(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() app := &testApp{} - cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() - proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) err := proxyApp.Start(ctx) require.NoError(t, err) state, stateDB, _ := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), - mmock.Mempool{}, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, + eventBus, ) - eventBus := eventbus.NewDefault(logger) - err = eventBus.Start(ctx) - require.NoError(t, err) - defer eventBus.Stop() - - blockExec.SetEventBus(eventBus) - updatesSub, err := eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ - ClientID: "TestEndBlockValidatorUpdates", + ClientID: "TestFinalizeBlockValidatorUpdates", Query: types.EventQueryValidatorSetUpdates, }) require.NoError(t, err) - block, err := sf.MakeBlock(state, 1, new(types.Commit)) - require.NoError(t, err) + block := sf.MakeBlock(state, 1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} @@ -519,33 +558,36 @@ func TestEndBlockValidatorUpdates(t *testing.T) { } } -// TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that +// TestFinalizeBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that // would result in empty set causes no panic, an error is raised and NextValidators is not updated -func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { +func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() app := &testApp{} - cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() - proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) err := proxyApp.Start(ctx) require.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + state, stateDB, _ := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), - proxyApp.Consensus(), - mmock.Mempool{}, + proxyApp, + new(mpmocks.Mempool), sm.EmptyEvidencePool{}, blockStore, + eventBus, ) - block, err := sf.MakeBlock(state, 1, new(types.Commit)) - require.NoError(t, err) + block := sf.MakeBlock(state, 1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} @@ -562,6 +604,292 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { assert.NotEmpty(t, state.NextValidators.Validators) } +func TestEmptyPrepareProposal(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + app := abcimocks.NewBaseMock() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + sm.EmptyEvidencePool{}, + nil, + eventBus, + ) + pa, _ := state.Validators.GetByIndex(0) + commit := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + _, err = blockExec.CreateProposalBlock(ctx, height, state, commit, pa, nil) + require.NoError(t, err) +} + +// TestPrepareProposalRemoveTxs tests that any transactions marked as REMOVED +// are not included in the block produced by CreateProposalBlock. The test also +// ensures that any transactions removed are also removed from the mempool. +func TestPrepareProposalRemoveTxs(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeTenTxs(height) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + trs := txsToTxRecords(types.Txs(txs)) + trs[0].Action = abci.TxRecord_REMOVED + trs[1].Action = abci.TxRecord_REMOVED + mp.On("RemoveTxByKey", mock.Anything).Return(nil).Twice() + + app := abcimocks.NewBaseMock() + app.On("PrepareProposal", mock.Anything).Return(abci.ResponsePrepareProposal{ + ModifiedTx: true, + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + ) + pa, _ := state.Validators.GetByIndex(0) + commit := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, nil) + require.NoError(t, err) + require.Len(t, block.Data.Txs.ToSliceOfBytes(), len(trs)-2) + + require.Equal(t, -1, block.Data.Txs.Index(types.Tx(trs[0].Tx))) + require.Equal(t, -1, block.Data.Txs.Index(types.Tx(trs[1].Tx))) + + mp.AssertCalled(t, "RemoveTxByKey", types.Tx(trs[0].Tx).Key()) + mp.AssertCalled(t, "RemoveTxByKey", types.Tx(trs[1].Tx).Key()) + mp.AssertExpectations(t) +} + +// TestPrepareProposalAddedTxsIncluded tests that any transactions marked as ADDED +// in the prepare proposal response are included in the block. The test also +// ensures that any transactions added are also checked into the mempool. +func TestPrepareProposalAddedTxsIncluded(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeTenTxs(height) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs[2:])) + mp.On("CheckTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + + trs := txsToTxRecords(types.Txs(txs)) + trs[0].Action = abci.TxRecord_ADDED + trs[1].Action = abci.TxRecord_ADDED + + app := abcimocks.NewBaseMock() + app.On("PrepareProposal", mock.Anything).Return(abci.ResponsePrepareProposal{ + ModifiedTx: true, + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + ) + pa, _ := state.Validators.GetByIndex(0) + commit := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, nil) + require.NoError(t, err) + + require.Equal(t, txs[0], block.Data.Txs[0]) + require.Equal(t, txs[1], block.Data.Txs[1]) + + mp.AssertExpectations(t) + mp.AssertCalled(t, "CheckTx", mock.Anything, types.Tx(trs[0].Tx), mock.Anything, mock.Anything) + mp.AssertCalled(t, "CheckTx", mock.Anything, types.Tx(trs[1].Tx), mock.Anything, mock.Anything) +} + +// TestPrepareProposalReorderTxs tests that CreateBlock produces a block with transactions +// in the order matching the order they are returned from PrepareProposal. +func TestPrepareProposalReorderTxs(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeTenTxs(height) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + trs := txsToTxRecords(types.Txs(txs)) + trs = trs[2:] + trs = append(trs[len(trs)/2:], trs[:len(trs)/2]...) + + app := abcimocks.NewBaseMock() + app.On("PrepareProposal", mock.Anything).Return(abci.ResponsePrepareProposal{ + ModifiedTx: true, + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + ) + pa, _ := state.Validators.GetByIndex(0) + commit := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, nil) + require.NoError(t, err) + for i, tx := range block.Data.Txs { + require.Equal(t, types.Tx(trs[i].Tx), tx) + } + + mp.AssertExpectations(t) + +} + +// TestPrepareProposalModifiedTxFalse tests that CreateBlock correctly ignores +// the ResponsePrepareProposal TxRecords if ResponsePrepareProposal does not +// set ModifiedTx to true. +func TestPrepareProposalModifiedTxFalse(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeTenTxs(height) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + trs := txsToTxRecords(types.Txs(txs)) + trs = append(trs[len(trs)/2:], trs[:len(trs)/2]...) + trs = trs[1:] + trs[0].Action = abci.TxRecord_REMOVED + trs[1] = &abci.TxRecord{ + Tx: []byte("new"), + Action: abci.TxRecord_ADDED, + } + + app := abcimocks.NewBaseMock() + app.On("PrepareProposal", mock.Anything).Return(abci.ResponsePrepareProposal{ + ModifiedTx: false, + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + ) + pa, _ := state.Validators.GetByIndex(0) + commit := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, nil) + require.NoError(t, err) + for i, tx := range block.Data.Txs { + require.Equal(t, txs[i], tx) + } + + mp.AssertExpectations(t) +} + func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { var ( h = make([]byte, tmhash.Size) @@ -577,3 +905,14 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc }, } } + +func txsToTxRecords(txs []types.Tx) []*abci.TxRecord { + trs := make([]*abci.TxRecord, len(txs)) + for i, tx := range txs { + trs[i] = &abci.TxRecord{ + Action: abci.TxRecord_UNMODIFIED, + Tx: tx, + } + } + return trs +} diff --git a/internal/state/export_test.go b/internal/state/export_test.go index 90e7e32a7..5f4110865 100644 --- a/internal/state/export_test.go +++ b/internal/state/export_test.go @@ -2,33 +2,9 @@ package state import ( abci "github.com/tendermint/tendermint/abci/types" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) -// -// TODO: Remove dependence on all entities exported from this file. -// -// Every entity exported here is dependent on a private entity from the `state` -// package. Currently, these functions are only made available to tests in the -// `state_test` package, but we should not be relying on them for our testing. -// Instead, we should be exclusively relying on exported entities for our -// testing, and should be refactoring exported entities to make them more -// easily testable from outside of the package. -// - -// UpdateState is an alias for updateState exported from execution.go, -// exclusively and explicitly for testing. -func UpdateState( - state State, - blockID types.BlockID, - header *types.Header, - abciResponses *tmstate.ABCIResponses, - validatorUpdates []*types.Validator, -) (State, error) { - return updateState(state, blockID, header, abciResponses, validatorUpdates) -} - // ValidateValidatorUpdates is an alias for validateValidatorUpdates exported // from execution.go, exclusively and explicitly for testing. func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error { diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index a5720f183..dffb6f256 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -11,17 +11,13 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" - "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -33,12 +29,6 @@ type paramsChangeTestCase struct { params types.ConsensusParams } -func newTestApp() proxy.AppConns { - app := &testApp{} - cc := abciclient.NewLocalCreator(app) - return proxy.NewAppConns(cc, log.NewNopLogger(), proxy.NopMetrics()) -} - func makeAndCommitGoodBlock( ctx context.Context, t *testing.T, @@ -72,12 +62,13 @@ func makeAndApplyGoodBlock( evidence []types.Evidence, ) (sm.State, types.BlockID) { t.Helper() - block, _, err := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + block := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) require.NoError(t, blockExec.ValidateBlock(ctx, state, block)) blockID := types.BlockID{Hash: block.Hash(), - PartSetHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} + PartSetHeader: partSet.Header()} state, err = blockExec.ApplyBlock(ctx, state, blockID, block) require.NoError(t, err) @@ -153,11 +144,8 @@ func makeHeaderPartsResponsesValPubKeyChange( pubkey crypto.PubKey, ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { - block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil}, - } + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + abciResponses := &tmstate.ABCIResponses{} // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { @@ -184,13 +172,11 @@ func makeHeaderPartsResponsesValPowerChange( ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { t.Helper() - block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil}, - } + abciResponses := &tmstate.ABCIResponses{} + abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{} // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { @@ -214,8 +200,7 @@ func makeHeaderPartsResponsesParams( ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { t.Helper() - block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) pbParams := params.ToProto() abciResponses := &tmstate.ABCIResponses{ FinalizeBlock: &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams}, @@ -296,15 +281,15 @@ func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { } func (app *testApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock { - app.CommitVotes = req.LastCommitInfo.Votes + app.CommitVotes = req.DecidedLastCommit.Votes app.ByzantineValidators = req.ByzantineValidators - resTxs := make([]*abci.ResponseDeliverTx, len(req.Txs)) + resTxs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { if len(tx) > 0 { - resTxs[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + resTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } else { - resTxs[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK + 10} // error + resTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK + 10} // error } } @@ -315,8 +300,8 @@ func (app *testApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFi AppVersion: 1, }, }, - Events: []abci.Event{}, - Txs: resTxs, + Events: []abci.Event{}, + TxResults: resTxs, } } diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index f26eb30bb..5356b4c07 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -20,7 +20,7 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) -// BlockerIndexer implements a block indexer, indexing BeginBlock and EndBlock +// BlockerIndexer implements a block indexer, indexing FinalizeBlock // events with an underlying KV store. Block events are indexed by their height, // such that matching search criteria returns the respective block height(s). type BlockerIndexer struct { @@ -44,12 +44,11 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { return idx.store.Has(key) } -// Index indexes BeginBlock and EndBlock events for a given block by its height. +// Index indexes FinalizeBlock events for a given block by its height. // The following is indexed: // // primary key: encode(block.height | height) => encode(height) -// BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height) -// EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) +// FinalizeBlock events: encode(eventType.eventAttr|eventValue|height|finalize_block) => encode(height) func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Close() @@ -65,19 +64,19 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { return err } - // 2. index BeginBlock events - if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, "finalize_block", height); err != nil { + // 2. index FinalizeBlock events + if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, types.EventTypeFinalizeBlock, height); err != nil { return fmt.Errorf("failed to index FinalizeBlock events: %w", err) } return batch.WriteSync() } -// Search performs a query for block heights that match a given BeginBlock -// and Endblock event search criteria. The given query can match against zero, -// one or more block heights. In the case of height queries, i.e. block.height=H, -// if the height is indexed, that height alone will be returned. An error and -// nil slice is returned. Otherwise, a non-nil slice and nil error is returned. +// Search performs a query for block heights that match a given FinalizeBlock +// The given query can match against zero or more block heights. In the case +// of height queries, i.e. block.height=H, if the height is indexed, that height +// alone will be returned. An error and nil slice is returned. Otherwise, a +// non-nil slice and nil error is returned. func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { results := make([]int64, 0) select { diff --git a/internal/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go index eabe981a3..0bca43848 100644 --- a/internal/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -92,19 +92,19 @@ func TestBlockIndexer(t *testing.T) { q: query.MustCompile(`block.height = 5`), results: []int64{5}, }, - "begin_event.key1 = 'value1'": { + "finalize_event.key1 = 'value1'": { q: query.MustCompile(`finalize_event1.key1 = 'value1'`), results: []int64{}, }, - "begin_event.proposer = 'FCAA001'": { + "finalize_event.proposer = 'FCAA001'": { q: query.MustCompile(`finalize_event1.proposer = 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, - "end_event.foo <= 5": { + "finalize_event.foo <= 5": { q: query.MustCompile(`finalize_event2.foo <= 5`), results: []int64{2, 4}, }, - "end_event.foo >= 100": { + "finalize_event.foo >= 100": { q: query.MustCompile(`finalize_event2.foo >= 100`), results: []int64{1}, }, @@ -112,11 +112,11 @@ func TestBlockIndexer(t *testing.T) { q: query.MustCompile(`block.height > 2 AND finalize_event2.foo <= 8`), results: []int64{4, 6, 8}, }, - "begin_event.proposer CONTAINS 'FFFFFFF'": { + "finalize_event.proposer CONTAINS 'FFFFFFF'": { q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FFFFFFF'`), results: []int64{}, }, - "begin_event.proposer CONTAINS 'FCAA001'": { + "finalize_event.proposer CONTAINS 'FCAA001'": { q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, diff --git a/internal/state/indexer/indexer.go b/internal/state/indexer/indexer.go index a1b78a257..7ff6733db 100644 --- a/internal/state/indexer/indexer.go +++ b/internal/state/indexer/indexer.go @@ -30,11 +30,11 @@ type BlockIndexer interface { // upon database query failure. Has(height int64) (bool, error) - // Index indexes BeginBlock and EndBlock events for a given block by its height. + // Index indexes FinalizeBlock events for a given block by its height. Index(types.EventDataNewBlockHeader) error - // Search performs a query for block heights that match a given BeginBlock - // and Endblock event search criteria. + // Search performs a query for block heights that match a given FinalizeBlock + // event search criteria. Search(ctx context.Context, q *query.Query) ([]int64, error) } diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index d640d4b23..f6261c519 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -80,7 +80,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Height: 1, Index: uint32(0), Tx: types.Tx("foo"), - Result: abci.ResponseDeliverTx{Code: 0}, + Result: abci.ExecTxResult{Code: 0}, } err = eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: *txResult1}) require.NoError(t, err) @@ -88,7 +88,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Height: 1, Index: uint32(1), Tx: types.Tx("bar"), - Result: abci.ResponseDeliverTx{Code: 0}, + Result: abci.ExecTxResult{Code: 0}, } err = eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: *txResult2}) require.NoError(t, err) diff --git a/internal/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go index b59d55856..d4b110f4a 100644 --- a/internal/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -338,7 +338,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { Height: 1, Index: 0, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index 9ac541c72..72d14b5d8 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -46,8 +46,7 @@ const ( dbName = "postgres" chainID = "test-chainID" - viewBlockEvents = "block_events" - viewTxEvents = "tx_events" + viewTxEvents = "tx_events" ) func TestMain(m *testing.M) { @@ -266,7 +265,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { Height: 1, Index: 0, Tx: types.Tx("HELLO WORLD"), - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", @@ -309,25 +308,6 @@ SELECT height FROM `+tableBlocks+` WHERE height = $1; } else if err != nil { t.Fatalf("Database query failed: %v", err) } - - // Verify the presence of begin_block and end_block events. - if err := testDB().QueryRow(` -SELECT type, height, chain_id FROM `+viewBlockEvents+` - WHERE height = $1 AND type = $2 AND chain_id = $3; -`, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows { - t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height) - } else if err != nil { - t.Fatalf("Database query failed: %c", err) - } - - if err := testDB().QueryRow(` -SELECT type, height, chain_id FROM `+viewBlockEvents+` - WHERE height = $1 AND type = $2 AND chain_id = $3; -`, height, types.EventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows { - t.Errorf("No %q event found for height=%d", types.EventTypeEndBlock, height) - } else if err != nil { - t.Fatalf("Database query failed: %v", err) - } } // verifyNotImplemented calls f and verifies that it returns both a diff --git a/internal/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go index e36aed185..7007d5bb5 100644 --- a/internal/state/indexer/tx/kv/kv_bench_test.go +++ b/internal/state/indexer/tx/kv/kv_bench_test.go @@ -43,7 +43,7 @@ func BenchmarkTxSearch(b *testing.B) { Height: int64(i), Index: 0, Tx: types.Tx(string(txBz)), - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index 2caf9efc1..8004c0f27 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -25,7 +25,7 @@ func TestTxIndex(t *testing.T) { Height: 1, Index: 0, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Events: nil, }, @@ -48,7 +48,7 @@ func TestTxIndex(t *testing.T) { Height: 1, Index: 0, Tx: tx2, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Events: nil, }, @@ -322,7 +322,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { Height: 1, Index: 0, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", @@ -346,7 +346,7 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { Height: 1, Index: txIndex, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", diff --git a/internal/state/state.go b/internal/state/state.go index 43cd78fb0..a31d8baad 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -91,7 +91,7 @@ type State struct { LastHeightValidatorsChanged int64 // Consensus parameters used for validating blocks. - // Changes returned by EndBlock and updated after Commit. + // Changes returned by FinalizeBlock and updated after Commit. ConsensusParams types.ConsensusParams LastHeightConsensusParamsChanged int64 @@ -129,23 +129,30 @@ func (state State) Copy() State { } // Equals returns true if the States are identical. -func (state State) Equals(state2 State) bool { - sbz, s2bz := state.Bytes(), state2.Bytes() - return bytes.Equal(sbz, s2bz) +func (state State) Equals(state2 State) (bool, error) { + sbz, err := state.Bytes() + if err != nil { + return false, err + } + s2bz, err := state2.Bytes() + if err != nil { + return false, err + } + return bytes.Equal(sbz, s2bz), nil } -// Bytes serializes the State using protobuf. -// It panics if either casting to protobuf or serialization fails. -func (state State) Bytes() []byte { +// Bytes serializes the State using protobuf, propagating marshaling +// errors +func (state State) Bytes() ([]byte, error) { sm, err := state.ToProto() if err != nil { - panic(err) + return nil, err } bz, err := proto.Marshal(sm) if err != nil { - panic(err) + return nil, err } - return bz + return bz, nil } // IsEmpty returns true if the State is equal to the empty State. @@ -260,7 +267,7 @@ func (state State) MakeBlock( commit *types.Commit, evidence []types.Evidence, proposerAddress []byte, -) (*types.Block, *types.PartSet, error) { +) *types.Block { // Build base block with block data. block := types.MakeBlock(height, txs, commit, evidence) @@ -274,12 +281,7 @@ func (state State) MakeBlock( proposerAddress, ) - bps, err := block.MakePartSet(types.BlockPartSizeBytes) - if err != nil { - return nil, nil, err - } - - return block, bps, nil + return block } //------------------------------------------------------------------------ diff --git a/internal/state/state_test.go b/internal/state/state_test.go index e66cde77a..f38d37ea4 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -18,6 +18,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/merkle" sm "github.com/tendermint/tendermint/internal/state" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -54,13 +55,18 @@ func TestStateCopy(t *testing.T) { stateCopy := state.Copy() - assert.True(t, state.Equals(stateCopy), + seq, err := state.Equals(stateCopy) + require.NoError(t, err) + assert.True(t, seq, "expected state and its copy to be identical.\ngot: %v\nexpected: %v", stateCopy, state) stateCopy.LastBlockHeight++ stateCopy.LastValidators = state.Validators - assert.False(t, state.Equals(stateCopy), "expected states to be different. got same %v", state) + + seq, err = state.Equals(stateCopy) + require.NoError(t, err) + assert.False(t, seq, "expected states to be different. got same %v", state) } // TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. @@ -89,7 +95,9 @@ func TestStateSaveLoad(t *testing.T) { loadedState, err := stateStore.Load() require.NoError(t, err) - assert.True(t, state.Equals(loadedState), + seq, err := state.Equals(loadedState) + require.NoError(t, err) + assert.True(t, seq, "expected state and its copy to be identical.\ngot: %v\nexpected: %v", loadedState, state) } @@ -103,16 +111,15 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ // Build mock responses. - block, err := statefactory.MakeBlock(state, 2, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) abciResponses := new(tmstate.ABCIResponses) - dtxs := make([]*abci.ResponseDeliverTx, 2) + dtxs := make([]*abci.ExecTxResult, 2) abciResponses.FinalizeBlock = new(abci.ResponseFinalizeBlock) - abciResponses.FinalizeBlock.Txs = dtxs + abciResponses.FinalizeBlock.TxResults = dtxs - abciResponses.FinalizeBlock.Txs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} - abciResponses.FinalizeBlock.Txs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} + abciResponses.FinalizeBlock.TxResults[0] = &abci.ExecTxResult{Data: []byte("foo"), Events: nil} + abciResponses.FinalizeBlock.TxResults[1] = &abci.ExecTxResult{Data: []byte("bar"), Log: "ok", Events: nil} pbpk, err := encoding.PubKeyToProto(ed25519.GenPrivKey().PubKey()) require.NoError(t, err) abciResponses.FinalizeBlock.ValidatorUpdates = []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}} @@ -136,23 +143,23 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { cases := [...]struct { // Height is implied to equal index+2, // as block 1 is created from genesis. - added []*abci.ResponseDeliverTx - expected []*abci.ResponseDeliverTx + added []*abci.ExecTxResult + expected []*abci.ExecTxResult }{ 0: { nil, nil, }, 1: { - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello")}, }, }, 2: { - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 383}, { Data: []byte("Gotcha!"), @@ -162,7 +169,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { }, }, }, - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 383, Data: nil}, {Code: 0, Data: []byte("Gotcha!"), Events: []abci.Event{ {Type: "type1", Attributes: []abci.EventAttribute{{Key: "a", Value: "1"}}}, @@ -175,7 +182,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { nil, }, 4: { - []*abci.ResponseDeliverTx{nil}, + []*abci.ExecTxResult{nil}, nil, }, } @@ -192,7 +199,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { h := int64(i + 1) // last block height, one below what we save responses := &tmstate.ABCIResponses{ FinalizeBlock: &abci.ResponseFinalizeBlock{ - Txs: tc.added, + TxResults: tc.added, }, } err := stateStore.SaveABCIResponses(h, responses) @@ -205,14 +212,13 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { res, err := stateStore.LoadABCIResponses(h) if assert.NoError(t, err, "%d", i) { t.Log(res) - responses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - Txs: tc.expected, - }, - } - sm.ABCIResponsesResultsHash(res) - sm.ABCIResponsesResultsHash(responses) - assert.Equal(t, sm.ABCIResponsesResultsHash(responses), sm.ABCIResponsesResultsHash(res), "%d", i) + e, err := abci.MarshalTxResults(tc.expected) + require.NoError(t, err) + he := merkle.HashFromByteSlices(e) + rs, err := abci.MarshalTxResults(res.FinalizeBlock.TxResults) + hrs := merkle.HashFromByteSlices(rs) + require.NoError(t, err) + assert.Equal(t, he, hrs, "%d", i) } } } @@ -278,9 +284,12 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { header, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, power) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) - state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) + rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) - err := stateStore.Save(state) + err = stateStore.Save(state) require.NoError(t, err) } @@ -451,19 +460,19 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 assert.EqualValues(t, 0, val1.ProposerPriority) - block, err := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) + require.NoError(t, err) + rs, err := abci.MarshalTxResults(fb.TxResults) require.NoError(t, err) - updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + h := merkle.HashFromByteSlices(rs) + updatedState, err := state.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) curTotal := val1VotingPower // one increment step and one validator: 0 + power - total_power == 0 @@ -478,7 +487,10 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) - updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState2, err := updatedState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) require.Equal(t, len(updatedState2.NextValidators.Validators), 2) @@ -517,7 +529,10 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // this will cause the diff of priorities (77) // to be larger than threshold == 2*totalVotingPower (22): - updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState3, err := updatedState2.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) require.Equal(t, len(updatedState3.NextValidators.Validators), 2) @@ -569,21 +584,21 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // we only have one validator: assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) - block, err := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} // no updates: - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) - updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedState, err := state.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) // 0 + 10 (initial prio) - 10 (avg) - 10 (mostest - total) = -10 @@ -600,7 +615,10 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) - updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState2, err := updatedState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) require.Equal(t, len(updatedState2.NextValidators.Validators), 2) @@ -640,10 +658,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { updatedVal2, ) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) - updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState3, err := updatedState2.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) assert.Equal(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address) @@ -679,15 +700,16 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // no changes in voting power and both validators have same voting power // -> proposers should alternate: oldState := updatedState3 - abciResponses = &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb = &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) - oldState, err = sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + oldState, err = oldState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) expectedVal1Prio2 = 1 expectedVal2Prio2 = -1 @@ -696,15 +718,16 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { for i := 0; i < 1000; i++ { // no validator updates: - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) - updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedState, err := oldState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) assert.NoError(t, err) // alternate (and cyclic priorities): assert.NotEqual( @@ -755,21 +778,21 @@ func TestLargeGenesisValidator(t *testing.T) { oldState := state for i := 0; i < 10; i++ { // no updates: - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) - block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedState, err := oldState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) // no changes in voting power (ProposerPrio += VotingPower == Voting in 1st round; than shiftByAvg == 0, // than -Total == -Voting) @@ -791,41 +814,41 @@ func TestLargeGenesisValidator(t *testing.T) { firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) assert.NoError(t, err) - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}, } - block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedState, err := oldState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) lastState := updatedState for i := 0; i < 200; i++ { // no updates: - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) - block, err := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) bps, err = block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedStateInner, err := lastState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) lastState = updatedStateInner } @@ -851,18 +874,18 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) assert.NoError(t, err) - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{addedVal}, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{addedVal}, } - block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) } require.Equal(t, 10+2, len(state.NextValidators.Validators)) @@ -871,22 +894,23 @@ func TestLargeGenesisValidator(t *testing.T) { gp, err := encoding.PubKeyToProto(genesisPubKey) require.NoError(t, err) removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} - abciResponses = &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}, - }, + fb = &abci.ResponseFinalizeBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}, } - block, err = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) require.NoError(t, err) bps, err = block.MakePartSet(testPartSize) require.NoError(t, err) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) + require.NoError(t, err) + rs, err = abci.MarshalTxResults(fb.TxResults) require.NoError(t, err) - updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + h = merkle.HashFromByteSlices(rs) + updatedState, err = state.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) // only the first added val (not the genesis val) should be left assert.Equal(t, 11, len(updatedState.NextValidators.Validators)) @@ -897,21 +921,21 @@ func TestLargeGenesisValidator(t *testing.T) { count := 0 isProposerUnchanged := true for isProposerUnchanged { - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb = &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) - require.NoError(t, err) - block, err = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) + block = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + curState, err = curState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) { isProposerUnchanged = false @@ -927,23 +951,23 @@ func TestLargeGenesisValidator(t *testing.T) { proposers := make([]*types.Validator, numVals) for i := 0; i < 100; i++ { // no updates: - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - ValidatorUpdates: nil, - }, + fb := &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(fb.ValidatorUpdates) require.NoError(t, err) - block, err := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedState, err = updatedState.Update(blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) if i > numVals { // expect proposers to cycle through after the first iteration (of numVals blocks): if proposers[i%numVals] == nil { @@ -1002,7 +1026,10 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { var validatorUpdates []*types.Validator validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) - state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) + rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 err = stateStore.Save(state) @@ -1035,8 +1062,7 @@ func TestStateMakeBlock(t *testing.T) { proposerAddress := state.Validators.GetProposer().Address stateVersion := state.Version.Consensus - block, err := statefactory.MakeBlock(state, 2, new(types.Commit)) - require.NoError(t, err) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) // test we set some fields assert.Equal(t, stateVersion, block.Version) @@ -1080,10 +1106,13 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { header, blockID, responses := makeHeaderPartsResponsesParams(t, state, &cp) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) - state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) + rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) - err := stateStore.Save(state) + err = stateStore.Save(state) require.NoError(t, err) } diff --git a/internal/state/store.go b/internal/state/store.go index c3e7b24a6..93bd3eb2b 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -170,7 +170,12 @@ func (store dbStore) save(state State, key []byte) error { return err } - if err := batch.Set(key, state.Bytes()); err != nil { + stateBz, err := state.Bytes() + if err != nil { + return err + } + + if err := batch.Set(key, stateBz); err != nil { return err } @@ -206,7 +211,12 @@ func (store dbStore) Bootstrap(state State) error { return err } - if err := batch.Set(stateKey, state.Bytes()); err != nil { + stateBz, err := state.Bytes() + if err != nil { + return err + } + + if err := batch.Set(stateKey, stateBz); err != nil { return err } @@ -396,14 +406,6 @@ func (store dbStore) reverseBatchDelete(batch dbm.Batch, start, end []byte) ([]b //------------------------------------------------------------------------ -// ABCIResponsesResultsHash returns the root hash of a Merkle tree of -// ResponseDeliverTx responses (see ABCIResults.Hash) -// -// See merkle.SimpleHashFromByteSlices -func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { - return types.NewResults(ar.FinalizeBlock.Txs).Hash() -} - // LoadABCIResponses loads the ABCIResponses for the given height from the // database. If not found, ErrNoABCIResponsesForHeight is returned. // @@ -442,15 +444,15 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI } func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { - var dtxs []*abci.ResponseDeliverTx + var dtxs []*abci.ExecTxResult // strip nil values, - for _, tx := range abciResponses.FinalizeBlock.Txs { + for _, tx := range abciResponses.FinalizeBlock.TxResults { if tx != nil { dtxs = append(dtxs, tx) } } - abciResponses.FinalizeBlock.Txs = dtxs + abciResponses.FinalizeBlock.TxResults = dtxs bz, err := abciResponses.Marshal() if err != nil { diff --git a/internal/state/store_test.go b/internal/state/store_test.go index fd9c4bf5a..59084fd10 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -7,7 +7,6 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -239,7 +238,7 @@ func TestPruneStates(t *testing.T) { err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{ FinalizeBlock: &abci.ResponseFinalizeBlock{ - Txs: []*abci.ResponseDeliverTx{ + TxResults: []*abci.ExecTxResult{ {Data: []byte{1}}, {Data: []byte{2}}, {Data: []byte{3}}, @@ -299,25 +298,3 @@ func TestPruneStates(t *testing.T) { }) } } - -func TestABCIResponsesResultsHash(t *testing.T) { - responses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - Txs: []*abci.ResponseDeliverTx{ - {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, - }, - }, - } - - root := sm.ABCIResponsesResultsHash(responses) - - // root should be Merkle tree root of FinalizeBlock tx responses - results := types.NewResults(responses.FinalizeBlock.Txs) - assert.Equal(t, root, results.Hash()) - - // test we can prove first tx in FinalizeBlock - proof := results.ProveResult(0) - bz, err := results[0].Marshal() - require.NoError(t, err) - assert.NoError(t, proof.Verify(root, bz)) -} diff --git a/internal/state/test/factory/block.go b/internal/state/test/factory/block.go index 5154d170a..14f49e2d5 100644 --- a/internal/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -42,19 +42,14 @@ func MakeBlocks(ctx context.Context, t *testing.T, n int, state *sm.State, privV return blocks } -func MakeBlock(state sm.State, height int64, c *types.Commit) (*types.Block, error) { - block, _, err := state.MakeBlock( +func MakeBlock(state sm.State, height int64, c *types.Commit) *types.Block { + return state.MakeBlock( height, factory.MakeTenTxs(state.LastBlockHeight), c, nil, state.Validators.GetProposer().Address, ) - if err != nil { - return nil, err - } - - return block, nil } func makeBlockAndPartSet( @@ -82,7 +77,8 @@ func makeBlockAndPartSet( lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) } - block, partSet, err := state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) return block, partSet diff --git a/internal/state/tx_filter.go b/internal/state/tx_filter.go index 871e08ae6..11dd9ce67 100644 --- a/internal/state/tx_filter.go +++ b/internal/state/tx_filter.go @@ -1,22 +1,85 @@ package state import ( + "sync" + "time" + + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) -// TxPreCheck returns a function to filter transactions before processing. -// The function limits the size of a transaction to the block's maximum data size. -func TxPreCheck(state State) mempool.PreCheckFunc { - maxDataBytes := types.MaxDataBytesNoEvidence( - state.ConsensusParams.Block.MaxBytes, - state.Validators.Size(), +func cachingStateFetcher(store Store) func() (State, error) { + const ttl = time.Second + + var ( + last time.Time + mutex = &sync.Mutex{} + cache State + err error ) - return mempool.PreCheckMaxBytes(maxDataBytes) + + return func() (State, error) { + mutex.Lock() + defer mutex.Unlock() + + if time.Since(last) < ttl && cache.ChainID != "" { + return cache, nil + } + + cache, err = store.Load() + if err != nil { + return State{}, err + } + last = time.Now() + + return cache, nil + } + } -// TxPostCheck returns a function to filter transactions after processing. +// TxPreCheckFromStore returns a function to filter transactions before processing. +// The function limits the size of a transaction to the block's maximum data size. +func TxPreCheckFromStore(store Store) mempool.PreCheckFunc { + fetch := cachingStateFetcher(store) + + return func(tx types.Tx) error { + state, err := fetch() + if err != nil { + return err + } + + return TxPreCheckForState(state)(tx) + } +} + +func TxPreCheckForState(state State) mempool.PreCheckFunc { + return func(tx types.Tx) error { + maxDataBytes := types.MaxDataBytesNoEvidence( + state.ConsensusParams.Block.MaxBytes, + state.Validators.Size(), + ) + return mempool.PreCheckMaxBytes(maxDataBytes)(tx) + } + +} + +// TxPostCheckFromStore returns a function to filter transactions after processing. // The function limits the gas wanted by a transaction to the block's maximum total gas. -func TxPostCheck(state State) mempool.PostCheckFunc { - return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) +func TxPostCheckFromStore(store Store) mempool.PostCheckFunc { + fetch := cachingStateFetcher(store) + + return func(tx types.Tx, resp *abci.ResponseCheckTx) error { + state, err := fetch() + if err != nil { + return err + } + return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas)(tx, resp) + } +} + +func TxPostCheckForState(state State) mempool.PostCheckFunc { + return func(tx types.Tx, resp *abci.ResponseCheckTx) error { + return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas)(tx, resp) + } } diff --git a/internal/state/tx_filter_test.go b/internal/state/tx_filter_test.go index 27af28a40..ac85543b2 100644 --- a/internal/state/tx_filter_test.go +++ b/internal/state/tx_filter_test.go @@ -31,7 +31,7 @@ func TestTxFilter(t *testing.T) { state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - f := sm.TxPreCheck(state) + f := sm.TxPreCheckForState(state) if tc.isErr { assert.NotNil(t, f(tc.tx), "#%v", i) } else { diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 4d78fde74..62d2051cb 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -10,10 +10,13 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" - memmock "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/eventbus" + mpmocks "github.com/tendermint/tendermint/internal/mempool/mocks" + "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" @@ -30,20 +33,36 @@ const validationTestsStopHeight int64 = 10 func TestValidateBlockHeader(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - proxyApp := newTestApp() + logger := log.TestingLogger() + proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics()) require.NoError(t, proxyApp.Start(ctx)) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + state, stateDB, privVals := makeState(t, 3, 1) stateStore := sm.NewStore(stateDB) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - memmock.Mempool{}, + logger, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, + eventBus, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) @@ -91,10 +110,9 @@ func TestValidateBlockHeader(t *testing.T) { Invalid blocks don't pass */ for _, tc := range testCases { - block, err := statefactory.MakeBlock(state, height, lastCommit) - require.NoError(t, err) + block := statefactory.MakeBlock(state, height, lastCommit) tc.malleateBlock(block) - err = blockExec.ValidateBlock(ctx, state, block) + err := blockExec.ValidateBlock(ctx, state, block) t.Logf("%s: %v", tc.name, err) require.Error(t, err, tc.name) } @@ -107,10 +125,9 @@ func TestValidateBlockHeader(t *testing.T) { } nextHeight := validationTestsStopHeight - block, err := statefactory.MakeBlock(state, nextHeight, lastCommit) - require.NoError(t, err) + block := statefactory.MakeBlock(state, nextHeight, lastCommit) state.InitialHeight = nextHeight + 1 - err = blockExec.ValidateBlock(ctx, state, block) + err := blockExec.ValidateBlock(ctx, state, block) require.Error(t, err, "expected an error when state is ahead of block") assert.Contains(t, err.Error(), "lower than initial height") } @@ -119,19 +136,36 @@ func TestValidateBlockCommit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - proxyApp := newTestApp() + logger := log.TestingLogger() + proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics()) require.NoError(t, proxyApp.Start(ctx)) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + state, stateDB, privVals := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - memmock.Mempool{}, + logger, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, + eventBus, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) wrongSigsCommit := types.NewCommit(1, 0, types.BlockID{}, nil) @@ -162,8 +196,7 @@ func TestValidateBlockCommit(t *testing.T) { state.LastBlockID, []types.CommitSig{wrongHeightVote.CommitSig()}, ) - block, err := statefactory.MakeBlock(state, height, wrongHeightCommit) - require.NoError(t, err) + block := statefactory.MakeBlock(state, height, wrongHeightCommit) err = blockExec.ValidateBlock(ctx, state, block) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) @@ -171,8 +204,7 @@ func TestValidateBlockCommit(t *testing.T) { /* #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() */ - block, err = statefactory.MakeBlock(state, height, wrongSigsCommit) - require.NoError(t, err) + block = statefactory.MakeBlock(state, height, wrongSigsCommit) err = blockExec.ValidateBlock(ctx, state, block) _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) require.True(t, isErrInvalidCommitSignatures, @@ -245,7 +277,8 @@ func TestValidateBlockEvidence(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - proxyApp := newTestApp() + logger := log.TestingLogger() + proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics()) require.NoError(t, proxyApp.Start(ctx)) state, stateDB, privVals := makeState(t, 4, 1) @@ -259,14 +292,29 @@ func TestValidateBlockEvidence(t *testing.T) { evpool.On("ABCIEvidence", mock.AnythingOfType("int64"), mock.AnythingOfType("[]types.Evidence")).Return( []abci.Evidence{}) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + state.ConsensusParams.Evidence.MaxBytes = 1000 blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), - proxyApp.Consensus(), - memmock.Mempool{}, + proxyApp, + mp, evpool, blockStore, + eventBus, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) @@ -287,10 +335,9 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _, err := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) - require.NoError(t, err) + block := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) - err = blockExec.ValidateBlock(ctx, state, block) + err := blockExec.ValidateBlock(ctx, state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d but got %v", height, err) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 1f65a8c0c..51f626027 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -11,11 +11,11 @@ import ( "sync" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" @@ -135,8 +135,7 @@ type Reactor struct { stateStore sm.Store blockStore *store.BlockStore - conn proxy.AppConnSnapshot - connQuery proxy.AppConnQuery + conn abciclient.Client tempDir string snapshotCh *p2p.Channel chunkCh *p2p.Channel @@ -173,8 +172,7 @@ func NewReactor( initialHeight int64, cfg config.StateSyncConfig, logger log.Logger, - conn proxy.AppConnSnapshot, - connQuery proxy.AppConnQuery, + conn abciclient.Client, channelCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, stateStore sm.Store, @@ -209,7 +207,6 @@ func NewReactor( initialHeight: initialHeight, cfg: cfg, conn: conn, - connQuery: connQuery, snapshotCh: snapshotCh, chunkCh: chunkCh, blockCh: blockCh, @@ -287,7 +284,6 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.cfg, r.logger, r.conn, - r.connQuery, r.stateProvider, r.snapshotCh, r.chunkCh, diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index f259cfa58..c1ac1a048 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -13,11 +13,11 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + clientmocks "github.com/tendermint/tendermint/abci/client/mocks" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" - proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" "github.com/tendermint/tendermint/internal/store" @@ -37,8 +37,7 @@ type reactorTestSuite struct { reactor *Reactor syncer *syncer - conn *proxymocks.AppConnSnapshot - connQuery *proxymocks.AppConnQuery + conn *clientmocks.Client stateProvider *mocks.StateProvider snapshotChannel *p2p.Channel @@ -71,21 +70,14 @@ type reactorTestSuite struct { func setup( ctx context.Context, t *testing.T, - conn *proxymocks.AppConnSnapshot, - connQuery *proxymocks.AppConnQuery, + conn *clientmocks.Client, stateProvider *mocks.StateProvider, chBuf uint, ) *reactorTestSuite { t.Helper() if conn == nil { - conn = &proxymocks.AppConnSnapshot{} - } - if connQuery == nil { - connQuery = &proxymocks.AppConnQuery{} - } - if stateProvider == nil { - stateProvider = &mocks.StateProvider{} + conn = &clientmocks.Client{} } rts := &reactorTestSuite{ @@ -102,7 +94,6 @@ func setup( paramsOutCh: make(chan p2p.Envelope, chBuf), paramsPeerErrCh: make(chan p2p.PeerError, chBuf), conn: conn, - connQuery: connQuery, stateProvider: stateProvider, } @@ -171,7 +162,6 @@ func setup( *cfg, logger.With("component", "reactor"), conn, - connQuery, chCreator, rts.peerUpdates, rts.stateStore, @@ -186,7 +176,6 @@ func setup( *cfg, logger.With("component", "syncer"), conn, - connQuery, stateProvider, rts.snapshotChannel, rts.chunkChannel, @@ -211,7 +200,7 @@ func TestReactor_Sync(t *testing.T) { defer cancel() const snapshotHeight = 7 - rts := setup(ctx, t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) chain := buildLightBlockChain(ctx, t, 1, 10, time.Now()) // app accepts any snapshot rts.conn.On("OfferSnapshot", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")). @@ -222,7 +211,7 @@ func TestReactor_Sync(t *testing.T) { Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) // app query returns valid state app hash - rts.connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ + rts.conn.On("Info", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: testAppVersion, LastBlockHeight: snapshotHeight, LastBlockAppHash: chain[snapshotHeight+1].AppHash, @@ -237,8 +226,8 @@ func TestReactor_Sync(t *testing.T) { defer close(closeCh) go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) - go graduallyAddPeers(t, rts.peerUpdateCh, closeCh, 1*time.Second) - go handleSnapshotRequests(t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ + go graduallyAddPeers(ctx, t, rts.peerUpdateCh, closeCh, 1*time.Second) + go handleSnapshotRequests(ctx, t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ { Height: uint64(snapshotHeight), Format: 1, @@ -246,7 +235,7 @@ func TestReactor_Sync(t *testing.T) { }, }) - go handleChunkRequests(t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) + go handleChunkRequests(ctx, t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) @@ -265,7 +254,7 @@ func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -316,14 +305,14 @@ func TestReactor_ChunkRequest(t *testing.T) { defer cancel() // mock ABCI connection to return local snapshots - conn := &proxymocks.AppConnSnapshot{} + conn := &clientmocks.Client{} conn.On("LoadSnapshotChunk", mock.Anything, abci.RequestLoadSnapshotChunk{ Height: tc.request.Height, Format: tc.request.Format, Chunk: tc.request.Index, }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) - rts := setup(ctx, t, conn, nil, nil, 2) + rts := setup(ctx, t, conn, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -343,7 +332,7 @@ func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -403,12 +392,12 @@ func TestReactor_SnapshotsRequest(t *testing.T) { defer cancel() // mock ABCI connection to return local snapshots - conn := &proxymocks.AppConnSnapshot{} + conn := &clientmocks.Client{} conn.On("ListSnapshots", mock.Anything, abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ Snapshots: tc.snapshots, }, nil) - rts := setup(ctx, t, conn, nil, nil, 100) + rts := setup(ctx, t, conn, nil, 100) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -435,7 +424,7 @@ func TestReactor_LightBlockResponse(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) var height int64 = 10 // generates a random header @@ -492,7 +481,7 @@ func TestReactor_BlockProviders(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) rts.peerUpdateCh <- p2p.PeerUpdate{ NodeID: types.NodeID("aa"), Status: p2p.PeerStatusUp, @@ -559,7 +548,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) // make syncer non nil else test won't think we are state syncing rts.reactor.syncer = rts.syncer peerA := types.NodeID(strings.Repeat("a", 2*types.NodeIDByteLength)) @@ -636,7 +625,7 @@ func TestReactor_Backfill(t *testing.T) { defer cancel() t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute)) - rts := setup(ctx, t, nil, nil, nil, 21) + rts := setup(ctx, t, nil, nil, 21) var ( startHeight int64 = 20 @@ -742,11 +731,15 @@ func handleLightBlockRequests( if requests%10 >= failureRate { lb, err := chain[int64(msg.Height)].ToProto() require.NoError(t, err) - sending <- p2p.Envelope{ + select { + case sending <- p2p.Envelope{ From: envelope.To, Message: &ssproto.LightBlockResponse{ LightBlock: lb, }, + }: + case <-ctx.Done(): + return } } else { switch errorCount % 3 { @@ -755,18 +748,26 @@ func handleLightBlockRequests( _, _, lb := mockLB(ctx, t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv) differntLB, err := lb.ToProto() require.NoError(t, err) - sending <- p2p.Envelope{ + select { + case sending <- p2p.Envelope{ From: envelope.To, Message: &ssproto.LightBlockResponse{ LightBlock: differntLB, }, + }: + case <-ctx.Done(): + return } case 1: // send nil block i.e. pretend we don't have it - sending <- p2p.Envelope{ + select { + case sending <- p2p.Envelope{ From: envelope.To, Message: &ssproto.LightBlockResponse{ LightBlock: nil, }, + }: + case <-ctx.Done(): + return } case 2: // don't do anything } @@ -794,19 +795,23 @@ func handleConsensusParamsRequest( case <-ctx.Done(): return case envelope := <-receiving: - if ctx.Err() != nil { + msg, ok := envelope.Message.(*ssproto.ParamsRequest) + if !ok { + t.Errorf("message was %T which is not a params request", envelope.Message) return } - - t.Log("received consensus params request") - msg, ok := envelope.Message.(*ssproto.ParamsRequest) - require.True(t, ok) - sending <- p2p.Envelope{ + select { + case sending <- p2p.Envelope{ From: envelope.To, Message: &ssproto.ParamsResponse{ Height: msg.Height, ConsensusParams: paramsProto, }, + }: + case <-ctx.Done(): + return + case <-closeCh: + return } case <-closeCh: @@ -860,6 +865,7 @@ func mockLB(ctx context.Context, t *testing.T, height int64, time time.Time, las // graduallyAddPeers delivers a new randomly-generated peer update on peerUpdateCh once // per interval, until closeCh is closed. Each peer update is assigned a random node ID. func graduallyAddPeers( + ctx context.Context, t *testing.T, peerUpdateCh chan p2p.PeerUpdate, closeCh chan struct{}, @@ -868,6 +874,10 @@ func graduallyAddPeers( ticker := time.NewTicker(interval) for { select { + case <-ctx.Done(): + return + case <-closeCh: + return case <-ticker.C: peerUpdateCh <- p2p.PeerUpdate{ NodeID: factory.RandomNodeID(t), @@ -879,13 +889,12 @@ func graduallyAddPeers( ParamsChannel: struct{}{}, }, } - case <-closeCh: - return } } } func handleSnapshotRequests( + ctx context.Context, t *testing.T, receivingCh chan p2p.Envelope, sendingCh chan p2p.Envelope, @@ -895,6 +904,10 @@ func handleSnapshotRequests( t.Helper() for { select { + case <-ctx.Done(): + return + case <-closeCh: + return case envelope := <-receivingCh: _, ok := envelope.Message.(*ssproto.SnapshotsRequest) require.True(t, ok) @@ -910,13 +923,12 @@ func handleSnapshotRequests( }, } } - case <-closeCh: - return } } } func handleChunkRequests( + ctx context.Context, t *testing.T, receivingCh chan p2p.Envelope, sendingCh chan p2p.Envelope, @@ -926,6 +938,10 @@ func handleChunkRequests( t.Helper() for { select { + case <-ctx.Done(): + return + case <-closeCh: + return case envelope := <-receivingCh: msg, ok := envelope.Message.(*ssproto.ChunkRequest) require.True(t, ok) @@ -940,8 +956,6 @@ func handleChunkRequests( }, } - case <-closeCh: - return } } } diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index e2e41586c..78eb8d53a 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -8,6 +8,7 @@ import ( "sync" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p" @@ -54,8 +55,7 @@ var ( type syncer struct { logger log.Logger stateProvider StateProvider - conn proxy.AppConnSnapshot - connQuery proxy.AppConnQuery + conn abciclient.Client snapshots *snapshotPool snapshotCh *p2p.Channel chunkCh *p2p.Channel @@ -76,8 +76,7 @@ type syncer struct { func newSyncer( cfg config.StateSyncConfig, logger log.Logger, - conn proxy.AppConnSnapshot, - connQuery proxy.AppConnQuery, + conn abciclient.Client, stateProvider StateProvider, snapshotCh *p2p.Channel, chunkCh *p2p.Channel, @@ -88,7 +87,6 @@ func newSyncer( logger: logger, stateProvider: stateProvider, conn: conn, - connQuery: connQuery, snapshots: newSnapshotPool(), snapshotCh: snapshotCh, chunkCh: chunkCh, @@ -547,7 +545,7 @@ func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uin // verifyApp verifies the sync, checking the app hash, last block height and app version func (s *syncer) verifyApp(ctx context.Context, snapshot *snapshot, appVersion uint64) error { - resp, err := s.connQuery.Info(ctx, proxy.RequestInfo) + resp, err := s.conn.Info(ctx, proxy.RequestInfo) if err != nil { return fmt.Errorf("failed to query ABCI app for appHash: %w", err) } diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index b199fc982..e3bf49259 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + clientmocks "github.com/tendermint/tendermint/abci/client/mocks" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/proxy" - proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync/mocks" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" @@ -62,13 +62,12 @@ func TestSyncer_SyncAny(t *testing.T) { stateProvider.On("AppHash", mock.Anything, uint64(2)).Return([]byte("app_hash_2"), nil) stateProvider.On("Commit", mock.Anything, uint64(1)).Return(commit, nil) stateProvider.On("State", mock.Anything, uint64(1)).Return(state, nil) - connSnapshot := &proxymocks.AppConnSnapshot{} - connQuery := &proxymocks.AppConnQuery{} + conn := &clientmocks.Client{} peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") peerCID := types.NodeID("cc") - rts := setup(ctx, t, connSnapshot, connQuery, stateProvider, 4) + rts := setup(ctx, t, conn, stateProvider, 4) rts.reactor.syncer = rts.syncer @@ -110,7 +109,7 @@ func TestSyncer_SyncAny(t *testing.T) { // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - connSnapshot.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ + conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: 2, Format: 2, @@ -119,7 +118,7 @@ func TestSyncer_SyncAny(t *testing.T) { }, AppHash: []byte("app_hash_2"), }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ + conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: s.Height, Format: s.Format, @@ -171,7 +170,7 @@ func TestSyncer_SyncAny(t *testing.T) { // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Run(func(args mock.Arguments) { time.Sleep(1 * time.Second) }).Return( &abci.ResponseApplySnapshotChunk{ @@ -179,16 +178,16 @@ func TestSyncer_SyncAny(t *testing.T) { RefetchChunks: []uint32{1}, }, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{1, 1, 0}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1, 1, 1}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ + conn.On("Info", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: testAppVersion, LastBlockHeight: 1, LastBlockAppHash: []byte("app_hash"), @@ -217,8 +216,7 @@ func TestSyncer_SyncAny(t *testing.T) { require.Equal(t, int64(len(rts.syncer.snapshots.snapshots)), rts.reactor.TotalSnapshots()) require.Equal(t, int64(0), rts.reactor.SnapshotChunksCount()) - connSnapshot.AssertExpectations(t) - connQuery.AssertExpectations(t) + conn.AssertExpectations(t) } func TestSyncer_SyncAny_noSnapshots(t *testing.T) { @@ -228,7 +226,7 @@ func TestSyncer_SyncAny_noSnapshots(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) _, _, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) @@ -241,7 +239,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") @@ -265,7 +263,7 @@ func TestSyncer_SyncAny_reject(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -307,7 +305,7 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -345,7 +343,7 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -394,7 +392,7 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -444,7 +442,7 @@ func TestSyncer_offerSnapshot(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ @@ -497,7 +495,7 @@ func TestSyncer_applyChunks_Results(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) body := []byte{1, 2, 3} chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, t.TempDir()) @@ -557,7 +555,7 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, t.TempDir()) require.NoError(t, err) @@ -628,7 +626,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(ctx, t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) // Set up three peers across two snapshots, and ask for one of them to be banned. // It should be banned from all snapshots. @@ -761,9 +759,9 @@ func TestSyncer_verifyApp(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - rts := setup(ctx, t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) - rts.connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(tc.response, tc.err) + rts.conn.On("Info", mock.Anything, proxy.RequestInfo).Return(tc.response, tc.err) err := rts.syncer.verifyApp(ctx, s, appVersion) unwrapped := errors.Unwrap(err) if unwrapped != nil { diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 8e1ee2db3..82949b103 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -86,11 +86,8 @@ func TestMain(m *testing.M) { stdlog.Fatal(err) } - block, err = factory.MakeBlock(state, 1, new(types.Commit)) + block = factory.MakeBlock(state, 1, new(types.Commit)) - if err != nil { - stdlog.Fatal(err) - } partSet, err = block.MakePartSet(2) if err != nil { stdlog.Fatal(err) @@ -121,8 +118,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // save a block - block, err := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) - require.NoError(t, err) + block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) validPartSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestCommit(10, tmtime.Now()) @@ -326,8 +322,7 @@ func TestLoadBaseMeta(t *testing.T) { bs := NewBlockStore(dbm.NewMemDB()) for h := int64(1); h <= 10; h++ { - block, err := factory.MakeBlock(state, h, new(types.Commit)) - require.NoError(t, err) + block := factory.MakeBlock(state, h, new(types.Commit)) partSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestCommit(h, tmtime.Now()) @@ -394,8 +389,7 @@ func TestPruneBlocks(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { - block, err := factory.MakeBlock(state, h, new(types.Commit)) - require.NoError(t, err) + block := factory.MakeBlock(state, h, new(types.Commit)) partSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestCommit(h, tmtime.Now()) @@ -502,8 +496,7 @@ func TestBlockFetchAtHeight(t *testing.T) { defer cleanup() require.NoError(t, err) require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") - block, err := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) - require.NoError(t, err) + block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) partSet, err := block.MakePartSet(2) require.NoError(t, err) @@ -545,8 +538,7 @@ func TestSeenAndCanonicalCommit(t *testing.T) { // are persisted. for h := int64(3); h <= 5; h++ { blockCommit := makeTestCommit(h-1, tmtime.Now()) - block, err := factory.MakeBlock(state, h, blockCommit) - require.NoError(t, err) + block := factory.MakeBlock(state, h, blockCommit) partSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestCommit(h, tmtime.Now()) diff --git a/libs/log/default.go b/libs/log/default.go index ee729db23..706977659 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -60,17 +60,6 @@ func NewDefaultLogger(format, level string) (Logger, error) { }, nil } -// MustNewDefaultLogger delegates a call NewDefaultLogger where it panics on -// error. -func MustNewDefaultLogger(format, level string) Logger { - logger, err := NewDefaultLogger(format, level) - if err != nil { - panic(err) - } - - return logger -} - func (l defaultLogger) Info(msg string, keyVals ...interface{}) { l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg) } diff --git a/libs/service/service.go b/libs/service/service.go index f7701633a..6221c7d92 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -14,6 +14,11 @@ var ( errAlreadyStopped = errors.New("already stopped") ) +var ( + _ Service = (*BaseService)(nil) + _ Service = (*NopService)(nil) +) + // Service defines a service that can be started, stopped, and reset. type Service interface { // Start is called to start the service, which should run until @@ -85,6 +90,12 @@ type BaseService struct { impl Implementation } +type NopService struct{} + +func (NopService) Start(_ context.Context) error { return nil } +func (NopService) IsRunning() bool { return true } +func (NopService) Wait() {} + // NewBaseService creates a new BaseService. func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseService { return &BaseService{ diff --git a/light/detector.go b/light/detector.go index 1d7a066cb..a5ac35a02 100644 --- a/light/detector.go +++ b/light/detector.go @@ -39,8 +39,8 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig lastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader witnessesToRemove = make([]int, 0) ) - c.logger.Debug("running detector against trace", "endBlockHeight", lastVerifiedHeader.Height, - "endBlockHash", lastVerifiedHeader.Hash, "length", len(primaryTrace)) + c.logger.Debug("running detector against trace", "finalizeBlockHeight", lastVerifiedHeader.Height, + "finalizeBlockHash", lastVerifiedHeader.Hash, "length", len(primaryTrace)) // launch one goroutine per witness to retrieve the light block of the target height // and compare it with the header from the primary diff --git a/light/helpers_test.go b/light/helpers_test.go index 37b4b5bf3..73ef2ae65 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -35,35 +35,12 @@ func genPrivKeys(n int) privKeys { return res } -// // Change replaces the key at index i. -// func (pkz privKeys) Change(i int) privKeys { -// res := make(privKeys, len(pkz)) -// copy(res, pkz) -// res[i] = ed25519.GenPrivKey() -// return res -// } - // Extend adds n more keys (to remove, just take a slice). func (pkz privKeys) Extend(n int) privKeys { extra := genPrivKeys(n) return append(pkz, extra...) } -// // GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits. -// func GenSecpPrivKeys(n int) privKeys { -// res := make(privKeys, n) -// for i := range res { -// res[i] = secp256k1.GenPrivKey() -// } -// return res -// } - -// // ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). -// func (pkz privKeys) ExtendSecp(n int) privKeys { -// extra := GenSecpPrivKeys(n) -// return append(pkz, extra...) -// } - // ToValidators produces a valset from the set of keys. // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution diff --git a/light/rpc/client.go b/light/rpc/client.go index 001e1c7f6..f1c21c994 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -458,16 +458,17 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.Re return nil, err } - // Build a Merkle tree of proto-encoded FinalizeBlock tx results and get a hash. - results := types.NewResults(res.TxsResults) - - // Build a Merkle tree out of the above 3 binary slices. - rH := merkle.HashFromByteSlices([][]byte{bbeBytes, results.Hash()}) + // Build a Merkle tree out of the slice. + rs, err := abci.MarshalTxResults(res.TxsResults) + if err != nil { + return nil, err + } + mh := merkle.HashFromByteSlices(append([][]byte{bbeBytes}, rs...)) // Verify block results. - if !bytes.Equal(rH, trustedBlock.LastResultsHash) { + if !bytes.Equal(mh, trustedBlock.LastResultsHash) { return nil, fmt.Errorf("last results %X does not match with trusted last results %X", - rH, trustedBlock.LastResultsHash) + mh, trustedBlock.LastResultsHash) } return res, nil diff --git a/node/node.go b/node/node.go index 7d7b75170..c2acfa7a8 100644 --- a/node/node.go +++ b/node/node.go @@ -58,12 +58,12 @@ type nodeImpl struct { router *p2p.Router nodeInfo types.NodeInfo nodeKey types.NodeKey // our node privkey - isListening bool // services eventSinks []indexer.EventSink stateStore sm.Store - blockStore *store.BlockStore // store the blockchain to disk + blockStore *store.BlockStore // store the blockchain to disk + evPool *evidence.Pool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots @@ -101,7 +101,10 @@ func newDefaultNode( return nil, err } - appClient, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + appClient, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + if err != nil { + return nil, err + } return makeNode( ctx, @@ -121,7 +124,7 @@ func makeNode( cfg *config.Config, filePrivval *privval.FilePV, nodeKey types.NodeKey, - clientCreator abciclient.Creator, + client abciclient.Client, genesisDocProvider genesisDocProvider, dbProvider config.DBProvider, logger log.Logger, @@ -144,11 +147,8 @@ func makeNode( return nil, combineCloseError(err, makeCloser(closers)) } - err = genDoc.ValidateAndComplete() - if err != nil { - return nil, combineCloseError( - fmt.Errorf("error in genesis doc: %w", err), - makeCloser(closers)) + if err = genDoc.ValidateAndComplete(); err != nil { + return nil, combineCloseError(fmt.Errorf("error in genesis doc: %w", err), makeCloser(closers)) } state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) @@ -159,15 +159,15 @@ func makeNode( nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), nodeMetrics.proxy) + proxyApp := proxy.New(client, logger.With("module", "proxy"), nodeMetrics.proxy) if err := proxyApp.Start(ctx); err != nil { return nil, fmt.Errorf("error starting proxy app connections: %w", err) } // EventBus and IndexerService must be started before the handshake because // we might need to index the txs of the replayed block as this might not have happened - // when the node stopped last time (i.e. the node stopped after it saved the block - // but before it indexed the txs, or, endblocker panicked) + // when the node stopped last time (i.e. the node stopped or crashed after it saved the block + // but before it indexed the txs) eventBus := eventbus.NewDefault(logger.With("module", "events")) if err := eventBus.Start(ctx); err != nil { return nil, combineCloseError(err, makeCloser(closers)) @@ -242,10 +242,6 @@ func makeNode( } } - // Determine whether we should do block sync. This must happen after the handshake, since the - // app may modify the validator set, specifying ourself as the only validator. - blockSync := !onlyValidatorIsUs(state, pubKey) - logNodeStartupInfo(state, pubKey, logger, cfg.Mode) // TODO: Fetch and provide real options and do proper p2p bootstrapping. @@ -272,15 +268,16 @@ func makeNode( } mpReactor, mp, err := createMempoolReactor(ctx, - cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, + cfg, proxyApp, stateStore, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) } - evReactor, evPool, err := createEvidenceReactor(ctx, - cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, nodeMetrics.evidence, eventBus, + evReactor, evPool, edbCloser, err := createEvidenceReactor(ctx, + cfg, dbProvider, stateStore, blockStore, peerManager, router, logger, nodeMetrics.evidence, eventBus, ) + closers = append(closers, edbCloser) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) } @@ -289,15 +286,20 @@ func makeNode( blockExec := sm.NewBlockExecutor( stateStore, logger.With("module", "state"), - proxyApp.Consensus(), + proxyApp, mp, evPool, blockStore, + eventBus, sm.BlockExecutorWithMetrics(nodeMetrics.state), ) + // Determine whether we should do block sync. This must happen after the handshake, since the + // app may modify the validator set, specifying ourself as the only validator. + blockSync := !onlyValidatorIsUs(state, pubKey) + csReactor, csState, err := createConsensusReactor(ctx, - cfg, state, blockExec, blockStore, mp, evPool, + cfg, stateStore, blockExec, blockStore, mp, evPool, privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, peerManager, router, logger, ) @@ -309,7 +311,7 @@ func makeNode( // doing a state sync first. bcReactor, err := blocksync.NewReactor(ctx, logger.With("module", "blockchain"), - state.Copy(), + stateStore, blockExec, blockStore, csReactor, @@ -343,8 +345,7 @@ func makeNode( genDoc.InitialHeight, *cfg.StateSync, logger.With("module", "statesync"), - proxyApp.Snapshot(), - proxyApp.Query(), + proxyApp, router.OpenChannel, peerManager.Subscribe(ctx), stateStore, @@ -357,7 +358,7 @@ func makeNode( return nil, combineCloseError(err, makeCloser(closers)) } - var pexReactor service.Service + var pexReactor service.Service = service.NopService{} if cfg.P2P.PexReactor { pexReactor, err = pex.NewReactor(ctx, logger, peerManager, router.OpenChannel, peerManager.Subscribe(ctx)) if err != nil { @@ -390,18 +391,18 @@ func makeNode( blockStore: blockStore, stateSyncReactor: stateSyncReactor, stateSync: stateSync, + evPool: evPool, shutdownOps: makeCloser(closers), rpcEnv: &rpccore.Environment{ - ProxyAppQuery: proxyApp.Query(), - ProxyAppMempool: proxyApp.Mempool(), - - StateStore: stateStore, - BlockStore: blockStore, + ProxyApp: proxyApp, EvidencePool: evPool, ConsensusState: csState, + StateStore: stateStore, + BlockStore: blockStore, + ConsensusReactor: csReactor, BlockSyncReactor: bcReactor, @@ -421,8 +422,6 @@ func makeNode( node.rpcEnv.PubKey = pubKey } - node.rpcEnv.P2PTransport = node - node.BaseService = *service.NewBaseService(logger, "Node", node) return node, nil @@ -467,6 +466,15 @@ func (n *nodeImpl) OnStart(ctx context.Context) error { } } + state, err := n.stateStore.Load() + if err != nil { + return err + } + if err := n.evPool.Start(state); err != nil { + return err + } + + n.rpcEnv.NodeInfo = n.nodeInfo // Start the RPC server before the P2P server // so we can eg. receive txs for the first block if n.config.RPC.ListenAddress != "" { @@ -485,7 +493,7 @@ func (n *nodeImpl) OnStart(ctx context.Context) error { if err := n.router.Start(ctx); err != nil { return err } - n.isListening = true + n.rpcEnv.IsListening = true for _, reactor := range n.services { if err := reactor.Start(ctx); err != nil { @@ -580,7 +588,7 @@ func (n *nodeImpl) OnStop() { n.stateSyncReactor.Wait() n.router.Wait() - n.isListening = false + n.rpcEnv.IsListening = false // finally stop the listeners / external services for _, l := range n.rpcListeners { @@ -669,21 +677,6 @@ func (n *nodeImpl) RPCEnvironment() *rpccore.Environment { //------------------------------------------------------------------------------ -func (n *nodeImpl) Listeners() []string { - return []string{ - fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress), - } -} - -func (n *nodeImpl) IsListening() bool { - return n.isListening -} - -// NodeInfo returns the Node's Info from the Switch. -func (n *nodeImpl) NodeInfo() types.NodeInfo { - return n.nodeInfo -} - // genesisDocProvider returns a GenesisDoc. // It allows the GenesisDoc to be pulled from sources other than the // filesystem, for instance from a distributed key-value store cluster. @@ -747,10 +740,7 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { // loadStateFromDBOrGenesisDocProvider attempts to load the state from the // database, or creates one using the given genesisDocProvider. On success this also // returns the genesis doc loaded through the given provider. -func loadStateFromDBOrGenesisDocProvider( - stateStore sm.Store, - genDoc *types.GenesisDoc, -) (sm.State, error) { +func loadStateFromDBOrGenesisDocProvider(stateStore sm.Store, genDoc *types.GenesisDoc) (sm.State, error) { // 1. Attempt to load state form the database state, err := stateStore.Load() @@ -764,19 +754,25 @@ func loadStateFromDBOrGenesisDocProvider( if err != nil { return sm.State{}, err } + + // 3. save the gensis document to the state store so + // its fetchable by other callers. + if err := stateStore.Save(state); err != nil { + return sm.State{}, err + } } return state, nil } -func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions { +func getRouterConfig(conf *config.Config, appClient abciclient.Client) p2p.RouterOptions { opts := p2p.RouterOptions{ QueueType: conf.P2P.QueueType, } - if conf.FilterPeers && proxyApp != nil { + if conf.FilterPeers && appClient != nil { opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { - res, err := proxyApp.Query().Query(ctx, abci.RequestQuery{ + res, err := appClient.Query(ctx, abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/id/%s", id), }) if err != nil { @@ -790,7 +786,7 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt } opts.FilterPeerByIP = func(ctx context.Context, ip net.IP, port uint16) error { - res, err := proxyApp.Query().Query(ctx, abci.RequestQuery{ + res, err := appClient.Query(ctx, abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/addr/%s", net.JoinHostPort(ip.String(), strconv.Itoa(int(port)))), }) if err != nil { diff --git a/node/node_test.go b/node/node_test.go index 41cb1b6a9..fcd633d71 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -273,8 +273,8 @@ func TestCreateProposalBlock(t *testing.T) { logger := log.NewNopLogger() - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, kvstore.NewApplication()) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) err = proxyApp.Start(ctx) require.NoError(t, err) @@ -291,15 +291,13 @@ func TestCreateProposalBlock(t *testing.T) { mp := mempool.NewTxMempool( logger.With("module", "mempool"), cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, + proxyApp, ) // Make EvidencePool evidenceDB := dbm.NewMemDB() blockStore := store.NewBlockStore(dbm.NewMemDB()) - evidencePool, err := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics()) - require.NoError(t, err) + evidencePool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), nil) // fill the evidence pool with more evidence // than can fit in a block @@ -325,17 +323,20 @@ func TestCreateProposalBlock(t *testing.T) { assert.NoError(t, err) } + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), + proxyApp, mp, evidencePool, blockStore, + eventBus, ) commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) - block, _, err := blockExec.CreateProposalBlock( + block, err := blockExec.CreateProposalBlock( ctx, height, state, commit, @@ -372,8 +373,8 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { logger := log.NewNopLogger() - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, kvstore.NewApplication()) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) err = proxyApp.Start(ctx) require.NoError(t, err) @@ -391,8 +392,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { mp := mempool.NewTxMempool( logger.With("module", "mempool"), cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, + proxyApp, ) // fill the mempool with one txs just below the maximum size @@ -401,17 +401,21 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), + proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, + eventBus, ) commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) - block, _, err := blockExec.CreateProposalBlock( + block, err := blockExec.CreateProposalBlock( ctx, height, state, commit, @@ -440,8 +444,8 @@ func TestMaxProposalBlockSize(t *testing.T) { logger := log.NewNopLogger() - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + cc := abciclient.NewLocalClient(logger, kvstore.NewApplication()) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) err = proxyApp.Start(ctx) require.NoError(t, err) @@ -456,8 +460,7 @@ func TestMaxProposalBlockSize(t *testing.T) { mp := mempool.NewTxMempool( logger.With("module", "mempool"), cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, + proxyApp, ) // fill the mempool with one txs just below the maximum size @@ -473,13 +476,17 @@ func TestMaxProposalBlockSize(t *testing.T) { assert.NoError(t, err) } + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), + proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, + eventBus, ) blockID := types.BlockID{ @@ -490,10 +497,16 @@ func TestMaxProposalBlockSize(t *testing.T) { }, } + // save the updated validator set for use by the block executor. + state.LastBlockHeight = math.MaxInt64 - 3 + state.LastHeightValidatorsChanged = math.MaxInt64 - 1 + state.NextValidators = state.Validators.Copy() + require.NoError(t, stateStore.Save(state)) + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) // change state in order to produce the largest accepted header state.LastBlockID = blockID - state.LastBlockHeight = math.MaxInt64 - 1 + state.LastBlockHeight = math.MaxInt64 - 2 state.LastBlockTime = timestamp state.LastResultsHash = tmhash.Sum([]byte("last_results_hash")) state.AppHash = tmhash.Sum([]byte("app_hash")) @@ -523,7 +536,7 @@ func TestMaxProposalBlockSize(t *testing.T) { commit.Signatures = append(commit.Signatures, cs) } - block, partSet, err := blockExec.CreateProposalBlock( + block, err := blockExec.CreateProposalBlock( ctx, math.MaxInt64, state, commit, @@ -531,6 +544,8 @@ func TestMaxProposalBlockSize(t *testing.T) { nil, ) require.NoError(t, err) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) // this ensures that the header is at max size block.Header.Time = timestamp diff --git a/node/public.go b/node/public.go index 0d6f1d93e..af3aece8e 100644 --- a/node/public.go +++ b/node/public.go @@ -35,7 +35,7 @@ func New( ctx context.Context, conf *config.Config, logger log.Logger, - cf abciclient.Creator, + cf abciclient.Client, gen *types.GenesisDoc, ) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(conf.NodeKeyFile()) diff --git a/node/setup.go b/node/setup.go index e880cd5c4..48ffcb073 100644 --- a/node/setup.go +++ b/node/setup.go @@ -10,6 +10,7 @@ import ( dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/blocksync" @@ -20,7 +21,6 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/pex" - "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink" @@ -171,8 +171,8 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { func createMempoolReactor( ctx context.Context, cfg *config.Config, - proxyApp proxy.AppConns, - state sm.State, + appClient abciclient.Client, + store sm.Store, memplMetrics *mempool.Metrics, peerManager *p2p.PeerManager, router *p2p.Router, @@ -183,11 +183,10 @@ func createMempoolReactor( mp := mempool.NewTxMempool( logger, cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, + appClient, mempool.WithMetrics(memplMetrics), - mempool.WithPreCheck(sm.TxPreCheck(state)), - mempool.WithPostCheck(sm.TxPostCheck(state)), + mempool.WithPreCheck(sm.TxPreCheckFromStore(store)), + mempool.WithPostCheck(sm.TxPostCheckFromStore(store)), ) reactor, err := mempool.NewReactor( @@ -214,27 +213,23 @@ func createEvidenceReactor( ctx context.Context, cfg *config.Config, dbProvider config.DBProvider, - stateDB dbm.DB, + store sm.Store, blockStore *store.BlockStore, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, metrics *evidence.Metrics, eventBus *eventbus.EventBus, -) (*evidence.Reactor, *evidence.Pool, error) { +) (*evidence.Reactor, *evidence.Pool, closer, error) { evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg}) if err != nil { - return nil, nil, fmt.Errorf("unable to initialize evidence db: %w", err) + return nil, nil, func() error { return nil }, fmt.Errorf("unable to initialize evidence db: %w", err) } + dbCloser := evidenceDB.Close logger = logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore, metrics) - if err != nil { - return nil, nil, fmt.Errorf("creating evidence pool: %w", err) - } - - evidencePool.SetEventBus(eventBus) + evidencePool := evidence.NewPool(logger, evidenceDB, store, blockStore, metrics, eventBus) evidenceReactor, err := evidence.NewReactor( ctx, @@ -244,16 +239,16 @@ func createEvidenceReactor( evidencePool, ) if err != nil { - return nil, nil, fmt.Errorf("creating evidence reactor: %w", err) + return nil, nil, dbCloser, fmt.Errorf("creating evidence reactor: %w", err) } - return evidenceReactor, evidencePool, nil + return evidenceReactor, evidencePool, dbCloser, nil } func createConsensusReactor( ctx context.Context, cfg *config.Config, - state sm.State, + store sm.Store, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mp mempool.Mempool, @@ -268,16 +263,20 @@ func createConsensusReactor( ) (*consensus.Reactor, *consensus.State, error) { logger = logger.With("module", "consensus") - consensusState := consensus.NewState(ctx, + consensusState, err := consensus.NewState(ctx, logger, cfg.Consensus, - state.Copy(), + store, blockExec, blockStore, mp, evidencePool, + eventBus, consensus.StateMetrics(csMetrics), ) + if err != nil { + return nil, nil, err + } if privValidator != nil && cfg.Mode == config.ModeValidator { consensusState.SetPrivValidator(ctx, privValidator) @@ -289,6 +288,7 @@ func createConsensusReactor( consensusState, router.OpenChannel, peerManager.Subscribe(ctx), + eventBus, waitSync, csMetrics, ) @@ -296,9 +296,6 @@ func createConsensusReactor( return nil, nil, err } - // Services which will be publishing and/or subscribing for messages (events) - // consensusReactor will set it on consensusState and blockExecutor. - reactor.SetEventBus(eventBus) return reactor, consensusState, nil } @@ -385,7 +382,7 @@ func createRouter( nodeKey types.NodeKey, peerManager *p2p.PeerManager, cfg *config.Config, - proxyApp proxy.AppConns, + appClient abciclient.Client, ) (*p2p.Router, error) { p2pLogger := logger.With("module", "p2p") @@ -416,7 +413,7 @@ func createRouter( peerManager, []p2p.Transport{transport}, []p2p.Endpoint{ep}, - getRouterConfig(cfg, proxyApp), + getRouterConfig(cfg, appClient), ) } diff --git a/proto/Dockerfile b/proto/Dockerfile deleted file mode 100644 index 92fff39e6..000000000 --- a/proto/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# This Dockerfile defines an image containing tools for linting, formatting, -# and compiling the Tendermint protos. -FROM golang:1.17-alpine - -# Install a commonly used set of programs for use with our protos. -# clang-extra-tools is included here because it provides clang-format, -# used to format the .proto files. -RUN apk add --no-cache build-base clang-extra-tools curl git - -ENV GOLANG_PROTOBUF_VERSION=1.3.1 \ - GOGO_PROTOBUF_VERSION=1.3.2 - -# Retrieve the go protoc programs and copy them into the PATH -RUN go install github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} && \ - go install github.com/gogo/protobuf/protoc-gen-gogo@v${GOGO_PROTOBUF_VERSION} && \ - go install github.com/gogo/protobuf/protoc-gen-gogofaster@v${GOGO_PROTOBUF_VERSION} && \ - mv "$(go env GOPATH)"/bin/* /usr/local/bin/ - -# Copy the 'buf' program out of the buildbuf/buf container. -COPY --from=bufbuild/buf:latest /usr/local/bin/* /usr/local/bin/ diff --git a/proto/buf.lock b/proto/buf.lock new file mode 100644 index 000000000..8c415e1af --- /dev/null +++ b/proto/buf.lock @@ -0,0 +1,7 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: gogo + repository: protobuf + commit: 4df00b267f944190a229ce3695781e99 diff --git a/buf.yaml b/proto/buf.yaml similarity index 50% rename from buf.yaml rename to proto/buf.yaml index cc4aced57..816db10f7 100644 --- a/buf.yaml +++ b/proto/buf.yaml @@ -1,16 +1,11 @@ -version: v1beta1 - -build: - roots: - - proto - - third_party/proto +version: v1 +deps: + - buf.build/gogo/protobuf +breaking: + use: + - FILE lint: use: - BASIC - FILE_LOWER_SNAKE_CASE - UNARY_RPC - ignore: - - gogoproto -breaking: - use: - - FILE diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index f3521d24d..c65e13f3e 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -21,26 +21,26 @@ import "gogoproto/gogo.proto"; message Request { oneof value { - RequestEcho echo = 1; - RequestFlush flush = 2; - RequestInfo info = 3; - RequestInitChain init_chain = 4; - RequestQuery query = 5; - RequestBeginBlock begin_block = 6 [deprecated = true]; - RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8 [deprecated = true]; - RequestEndBlock end_block = 9 [deprecated = true]; - RequestCommit commit = 10; - RequestListSnapshots list_snapshots = 11; - RequestOfferSnapshot offer_snapshot = 12; - RequestLoadSnapshotChunk load_snapshot_chunk = 13; - RequestApplySnapshotChunk apply_snapshot_chunk = 14; - RequestPrepareProposal prepare_proposal = 15; - RequestProcessProposal process_proposal = 16; - RequestFinalizeBlock finalize_block = 19; + RequestEcho echo = 1; + RequestFlush flush = 2; + RequestInfo info = 3; + RequestInitChain init_chain = 4; + RequestQuery query = 5; + RequestBeginBlock begin_block = 6 [deprecated = true]; + RequestCheckTx check_tx = 7; + RequestDeliverTx deliver_tx = 8 [deprecated = true]; + RequestEndBlock end_block = 9 [deprecated = true]; + RequestCommit commit = 10; + RequestListSnapshots list_snapshots = 11; + RequestOfferSnapshot offer_snapshot = 12; + RequestLoadSnapshotChunk load_snapshot_chunk = 13; + RequestApplySnapshotChunk apply_snapshot_chunk = 14; + RequestPrepareProposal prepare_proposal = 15; + RequestProcessProposal process_proposal = 16; + RequestExtendVote extend_vote = 17; + RequestVerifyVoteExtension verify_vote_extension = 18; + RequestFinalizeBlock finalize_block = 19; } - reserved 17; // Placeholder for RequestExtendVote in v0.37 - reserved 18; // Placeholder for RequestVerifyVoteExtension in v0.37 } message RequestEcho { @@ -75,7 +75,7 @@ message RequestQuery { message RequestBeginBlock { bytes hash = 1; tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; + CommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; } @@ -127,9 +127,9 @@ message RequestPrepareProposal { tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; // txs is an array of transactions that will be included in a block, // sent to the app for possible modifications. - repeated bytes txs = 3; - LastCommitInfo last_commit_info = 4 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; + repeated bytes txs = 3; + ExtendedCommitInfo local_last_commit = 4 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; // the modified transactions cannot exceed this size. int64 max_tx_bytes = 6; } @@ -138,15 +138,29 @@ message RequestProcessProposal { bytes hash = 1; tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; repeated bytes txs = 3; - LastCommitInfo last_commit_info = 4 [(gogoproto.nullable) = false]; + CommitInfo proposed_last_commit = 4 [(gogoproto.nullable) = false]; repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; } +// Extends a vote with application-side injection +message RequestExtendVote { + bytes hash = 1; + int64 height = 2; +} + +// Verify the vote extension +message RequestVerifyVoteExtension { + bytes hash = 1; + bytes validator_address = 2; + int64 height = 3; + bytes vote_extension = 4; +} + message RequestFinalizeBlock { bytes hash = 1; tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; repeated bytes txs = 3; - LastCommitInfo last_commit_info = 4 [(gogoproto.nullable) = false]; + CommitInfo decided_last_commit = 4 [(gogoproto.nullable) = false]; repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; } @@ -155,27 +169,27 @@ message RequestFinalizeBlock { message Response { oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseInitChain init_chain = 5; - ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7 [deprecated = true]; - ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9 [deprecated = true]; - ResponseEndBlock end_block = 10 [deprecated = true]; - ResponseCommit commit = 11; - ResponseListSnapshots list_snapshots = 12; - ResponseOfferSnapshot offer_snapshot = 13; - ResponseLoadSnapshotChunk load_snapshot_chunk = 14; - ResponseApplySnapshotChunk apply_snapshot_chunk = 15; - ResponsePrepareProposal prepare_proposal = 16; - ResponseProcessProposal process_proposal = 17; - ResponseFinalizeBlock finalize_block = 20; + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseInitChain init_chain = 5; + ResponseQuery query = 6; + ResponseBeginBlock begin_block = 7 [deprecated = true]; + ResponseCheckTx check_tx = 8; + ResponseDeliverTx deliver_tx = 9 [deprecated = true]; + ResponseEndBlock end_block = 10 [deprecated = true]; + ResponseCommit commit = 11; + ResponseListSnapshots list_snapshots = 12; + ResponseOfferSnapshot offer_snapshot = 13; + ResponseLoadSnapshotChunk load_snapshot_chunk = 14; + ResponseApplySnapshotChunk apply_snapshot_chunk = 15; + ResponsePrepareProposal prepare_proposal = 16; + ResponseProcessProposal process_proposal = 17; + ResponseExtendVote extend_vote = 18; + ResponseVerifyVoteExtension verify_vote_extension = 19; + ResponseFinalizeBlock finalize_block = 20; } - reserved 18; // Placeholder for ResponseExtendVote in v0.37 - reserved 19; // Placeholder for ResponseVerifyVoteExtension in v0.37 } // nondeterministic @@ -308,7 +322,6 @@ message ResponsePrepareProposal { repeated ExecTxResult tx_results = 4; repeated ValidatorUpdate validator_updates = 5; tendermint.types.ConsensusParams consensus_param_updates = 6; - reserved 7; // Placeholder for app_signed_updates in v0.37 } message ResponseProcessProposal { @@ -319,8 +332,16 @@ message ResponseProcessProposal { tendermint.types.ConsensusParams consensus_param_updates = 5; } +message ResponseExtendVote { + bytes vote_extension = 1; +} + +message ResponseVerifyVoteExtension { + bool accept = 1; +} + message ResponseFinalizeBlock { - repeated Event block_events = 1 + repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; repeated ExecTxResult tx_results = 2; repeated ValidatorUpdate validator_updates = 3; @@ -332,11 +353,16 @@ message ResponseFinalizeBlock { //---------------------------------------- // Misc. -message LastCommitInfo { +message CommitInfo { int32 round = 1; repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; } +message ExtendedCommitInfo { + int32 round = 1; + repeated ExtendedVoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + // Event allows application developers to attach additional information to // ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. // Later, transactions may be queried using these events. @@ -362,7 +388,7 @@ message ExecTxResult { string info = 4; // nondeterministic int64 gas_wanted = 5; int64 gas_used = 6; - repeated Event tx_events = 7 + repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic string codespace = 8; } @@ -410,8 +436,23 @@ message ValidatorUpdate { message VoteInfo { Validator validator = 1 [(gogoproto.nullable) = false]; bool signed_last_block = 2; - reserved 3; // Placeholder for tendermint_signed_extension in v0.37 - reserved 4; // Placeholder for app_signed_extension in v0.37 +} + +// ExtendedVoteInfo +message ExtendedVoteInfo { + Validator validator = 1 [(gogoproto.nullable) = false]; + bool signed_last_block = 2; + bytes vote_extension = 3; +} + +// CanonicalVoteExtension +// TODO: move this to core Tendermint data structures +message CanonicalVoteExtension { + bytes extension = 1; + int64 height = 2; + int32 round = 3; + string chain_id = 4; + bytes address = 5; } enum EvidenceType { @@ -462,5 +503,7 @@ service ABCIApplication { rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); rpc PrepareProposal(RequestPrepareProposal) returns (ResponsePrepareProposal); rpc ProcessProposal(RequestProcessProposal) returns (ResponseProcessProposal); + rpc ExtendVote(RequestExtendVote) returns (ResponseExtendVote); + rpc VerifyVoteExtension(RequestVerifyVoteExtension) returns (ResponseVerifyVoteExtension); rpc FinalizeBlock(RequestFinalizeBlock) returns (ResponseFinalizeBlock); } diff --git a/proto/tendermint/abci/types.proto.intermediate b/proto/tendermint/abci/types.proto.intermediate new file mode 100644 index 000000000..d710ed06f --- /dev/null +++ b/proto/tendermint/abci/types.proto.intermediate @@ -0,0 +1,504 @@ +syntax = "proto3"; +package tendermint.abci; + +import "tendermint/crypto/proof.proto"; +import "tendermint/types/types.proto"; +import "tendermint/crypto/keys.proto"; +import "tendermint/types/params.proto"; +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; + +// This file is a temporary workaround to enable development during the ABCI++ +// project. This file should be deleted and any references to it removed when +// the ongoing work on ABCI++ is completed. +// +// For the duration of ABCI++, this file should be able to build the `abci/types/types.pb.go` +// file. Any changes that update that file must come as a result of a change in +// this .proto file. +// For more information, see https://github.com/tendermint/tendermint/issues/8066 + +//---------------------------------------- +// Request types + +message Request { + oneof value { + RequestEcho echo = 1; + RequestFlush flush = 2; + RequestInfo info = 3; + RequestInitChain init_chain = 4; + RequestQuery query = 5; + RequestBeginBlock begin_block = 6 [deprecated = true]; + RequestCheckTx check_tx = 7; + RequestDeliverTx deliver_tx = 8 [deprecated = true]; + RequestEndBlock end_block = 9 [deprecated = true]; + RequestCommit commit = 10; + RequestListSnapshots list_snapshots = 11; + RequestOfferSnapshot offer_snapshot = 12; + RequestLoadSnapshotChunk load_snapshot_chunk = 13; + RequestApplySnapshotChunk apply_snapshot_chunk = 14; + RequestPrepareProposal prepare_proposal = 15; + RequestProcessProposal process_proposal = 16; + RequestExtendVote extend_vote = 17; + RequestVerifyVoteExtension verify_vote_extension = 18; + RequestFinalizeBlock finalize_block = 19; + } +} + +message RequestEcho { + string message = 1; +} + +message RequestFlush {} + +message RequestInfo { + string version = 1; + uint64 block_version = 2; + uint64 p2p_version = 3; + string abci_version = 4; +} + +message RequestInitChain { + google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 2; + tendermint.types.ConsensusParams consensus_params = 3; + repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; + bytes app_state_bytes = 5; + int64 initial_height = 6; +} + +message RequestQuery { + bytes data = 1; + string path = 2; + int64 height = 3; + bool prove = 4; +} + +message RequestBeginBlock { + bytes hash = 1; + tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; + CommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; +} + +enum CheckTxType { + NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; + RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; +} + +message RequestCheckTx { + bytes tx = 1; + CheckTxType type = 2; +} + +message RequestDeliverTx { + bytes tx = 1; +} + +message RequestEndBlock { + int64 height = 1; +} + +message RequestCommit {} + +// lists available snapshots +message RequestListSnapshots {} + +// offers a snapshot to the application +message RequestOfferSnapshot { + Snapshot snapshot = 1; // snapshot offered by peers + bytes app_hash = 2; // light client-verified app hash for snapshot height +} + +// loads a snapshot chunk +message RequestLoadSnapshotChunk { + uint64 height = 1; + uint32 format = 2; + uint32 chunk = 3; +} + +// Applies a snapshot chunk +message RequestApplySnapshotChunk { + uint32 index = 1; + bytes chunk = 2; + string sender = 3; +} + +// Extends a vote with application-side injection +message RequestExtendVote { + types.Vote vote = 1; +} + +// Verify the vote extension +message RequestVerifyVoteExtension { + types.Vote vote = 1; +} + +message RequestPrepareProposal { + bytes hash = 1; + tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + repeated bytes txs = 3; + ExtendedCommitInfo local_last_commit = 4 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; + // the modified transactions cannot exceed this size. + int64 max_tx_bytes = 6; +} + +message RequestProcessProposal { + bytes hash = 1; + tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; + repeated bytes txs = 3; + CommitInfo proposed_last_commit = 4 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; +} + +message RequestFinalizeBlock { + bytes hash = 1; + tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; + repeated bytes txs = 3; + CommitInfo decided_last_commit = 4 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; +} + +//---------------------------------------- +// Response types + +message Response { + oneof value { + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseInitChain init_chain = 5; + ResponseQuery query = 6; + ResponseBeginBlock begin_block = 7 [deprecated = true]; + ResponseCheckTx check_tx = 8; + ResponseDeliverTx deliver_tx = 9 [deprecated = true]; + ResponseEndBlock end_block = 10 [deprecated = true]; + ResponseCommit commit = 11; + ResponseListSnapshots list_snapshots = 12; + ResponseOfferSnapshot offer_snapshot = 13; + ResponseLoadSnapshotChunk load_snapshot_chunk = 14; + ResponseApplySnapshotChunk apply_snapshot_chunk = 15; + ResponsePrepareProposal prepare_proposal = 16; + ResponseProcessProposal process_proposal = 17; + ResponseExtendVote extend_vote = 18; + ResponseVerifyVoteExtension verify_vote_extension = 19; + ResponseFinalizeBlock finalize_block = 20; + } +} + +// nondeterministic +message ResponseException { + string error = 1; +} + +message ResponseEcho { + string message = 1; +} + +message ResponseFlush {} + +message ResponseInfo { + string data = 1; + + // this is the software version of the application. TODO: remove? + string version = 2; + uint64 app_version = 3; + + int64 last_block_height = 4; + bytes last_block_app_hash = 5; +} + +message ResponseInitChain { + tendermint.types.ConsensusParams consensus_params = 1; + repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; + bytes app_hash = 3; +} + +message ResponseQuery { + uint32 code = 1; + // bytes data = 2; // use "value" instead. + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 index = 5; + bytes key = 6; + bytes value = 7; + tendermint.crypto.ProofOps proof_ops = 8; + int64 height = 9; + string codespace = 10; +} + +message ResponseBeginBlock { + repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5; + int64 gas_used = 6; + repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + string codespace = 8; + string sender = 9; + int64 priority = 10; + + // mempool_error is set by Tendermint. + + // ABCI applications creating a ResponseCheckTX should not set mempool_error. + string mempool_error = 11; +} + +message ResponseDeliverTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic + string codespace = 8; +} + +message ResponseEndBlock { + repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; + tendermint.types.ConsensusParams consensus_param_updates = 2; + repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +message ResponseCommit { + // reserve 1 + bytes data = 2; + int64 retain_height = 3; +} + +message ResponseListSnapshots { + repeated Snapshot snapshots = 1; +} + +message ResponseOfferSnapshot { + Result result = 1; + + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Snapshot accepted, apply chunks + ABORT = 2; // Abort all snapshot restoration + REJECT = 3; // Reject this specific snapshot, try others + REJECT_FORMAT = 4; // Reject all snapshots of this format, try others + REJECT_SENDER = 5; // Reject all snapshots from the sender(s), try others + } +} + +message ResponseLoadSnapshotChunk { + bytes chunk = 1; +} + +message ResponseApplySnapshotChunk { + Result result = 1; + repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply + repeated string reject_senders = 3; // Chunk senders to reject and ban + + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Chunk successfully accepted + ABORT = 2; // Abort all snapshot restoration + RETRY = 3; // Retry chunk (combine with refetch and reject) + RETRY_SNAPSHOT = 4; // Retry snapshot (combine with refetch and reject) + REJECT_SNAPSHOT = 5; // Reject this snapshot, try others + } +} + +message ResponseExtendVote { + tendermint.types.VoteExtension vote_extension = 1; +} + +message ResponseVerifyVoteExtension { + Result result = 1; + + enum Result { + UNKNOWN = 0; // Unknown result, reject vote extension + ACCEPT = 1; // Vote extension verified, include the vote + SLASH = 2; // Vote extension verification aborted, continue but slash validator + REJECT = 3; // Vote extension invalidated + } +} + +message ResponsePrepareProposal { + bool modified_tx = 1; + repeated TxRecord tx_records = 2; + bytes app_hash = 3; + repeated ExecTxResult tx_results = 4; + repeated ValidatorUpdate validator_updates = 5; + tendermint.types.ConsensusParams consensus_param_updates = 6; +} + +message ResponseProcessProposal { + bool accept = 1; + bytes app_hash = 2; + repeated ExecTxResult tx_results = 3; + repeated ValidatorUpdate validator_updates = 4; + tendermint.types.ConsensusParams consensus_param_updates = 5; +} + +message ResponseFinalizeBlock { + repeated Event events = 1 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + repeated ExecTxResult tx_results = 2; + repeated ValidatorUpdate validator_updates = 3 [(gogoproto.nullable) = false]; + tendermint.types.ConsensusParams consensus_param_updates = 4; + bytes app_hash = 5; + int64 retain_height = 6; +} + +//---------------------------------------- +// Misc. + +message CommitInfo { + int32 round = 1; + repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +message ExtendedCommitInfo { + int32 round = 1; + repeated ExtendedVoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// Event allows application developers to attach additional information to +// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// Later, transactions may be queried using these events. +message Event { + string type = 1; + repeated EventAttribute attributes = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes,omitempty"]; +} + +// EventAttribute is a single key-value pair, associated with an event. +message EventAttribute { + string key = 1; + string value = 2; + bool index = 3; // nondeterministic +} + +// ExecTxResult contains results of executing one individual transaction. +// +// * Its structure is equivalent to #ResponseDeliverTx which will be deprecated/deleted +message ExecTxResult { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5; + int64 gas_used = 6; + repeated Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic + string codespace = 8; +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +message TxResult { + int64 height = 1; + uint32 index = 2; + bytes tx = 3; + ExecTxResult result = 4 [(gogoproto.nullable) = false]; +} + +message TxRecord { + TxAction action = 1; + bytes tx = 2; + + // TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal + enum TxAction { + UNKNOWN = 0; // Unknown action + UNMODIFIED = 1; // The Application did not modify this transaction. + ADDED = 2; // The Application added this transaction. + REMOVED = 3; // The Application wants this transaction removed from the proposal and the mempool. + } +} + +//---------------------------------------- +// Blockchain Types + +// Validator +message Validator { + bytes address = 1; // The first 20 bytes of SHA256(public key) + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; + int64 power = 3; // The voting power +} + +// ValidatorUpdate +message ValidatorUpdate { + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + int64 power = 2; +} + +// VoteInfo +message VoteInfo { + Validator validator = 1 [(gogoproto.nullable) = false]; + bool signed_last_block = 2; + reserved 3; // Placeholder for tendermint_signed_extension in v0.37 + reserved 4; // Placeholder for app_signed_extension in v0.37 +} + +message ExtendedVoteInfo { + Validator validator = 1 [(gogoproto.nullable) = false]; + bool signed_last_block = 2; + bytes vote_extension = 3; +} + +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} + +message Evidence { + EvidenceType type = 1; + // The offending validator + Validator validator = 2 [(gogoproto.nullable) = false]; + // The height when the offense occurred + int64 height = 3; + // The corresponding time where the offense occurred + google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + int64 total_voting_power = 5; +} + +//---------------------------------------- +// State Sync Types + +message Snapshot { + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash, equal only if identical + bytes metadata = 5; // Arbitrary application metadata +} + +//---------------------------------------- +// Service Definition + +service ABCIApplication { + rpc Echo(RequestEcho) returns (ResponseEcho); + rpc Flush(RequestFlush) returns (ResponseFlush); + rpc Info(RequestInfo) returns (ResponseInfo); + rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); + rpc Query(RequestQuery) returns (ResponseQuery); + rpc Commit(RequestCommit) returns (ResponseCommit); + rpc InitChain(RequestInitChain) returns (ResponseInitChain); + rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); + rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); + rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); + rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); + rpc PrepareProposal(RequestPrepareProposal) returns (ResponsePrepareProposal); + rpc ProcessProposal(RequestProcessProposal) returns (ResponseProcessProposal); + rpc ExtendVote(RequestExtendVote) returns (ResponseExtendVote); + rpc VerifyVoteExtension(RequestVerifyVoteExtension) returns (ResponseVerifyVoteExtension); + rpc FinalizeBlock(RequestFinalizeBlock) returns (ResponseFinalizeBlock); +} diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto index 999a6db7f..4febfd145 100644 --- a/proto/tendermint/blocksync/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.blocksync; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync"; + import "tendermint/types/block.proto"; // BlockRequest requests a block for a specific height diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto index fd0e427d0..7abe0d74b 100644 --- a/proto/tendermint/consensus/types.proto +++ b/proto/tendermint/consensus/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.consensus; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; + import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "tendermint/libs/bits/types.proto"; diff --git a/proto/tendermint/consensus/wal.proto b/proto/tendermint/consensus/wal.proto index 22531e0d0..44afa2c0c 100644 --- a/proto/tendermint/consensus/wal.proto +++ b/proto/tendermint/consensus/wal.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.consensus; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; + import "gogoproto/gogo.proto"; import "tendermint/consensus/types.proto"; import "tendermint/types/events.proto"; diff --git a/proto/tendermint/crypto/keys.proto b/proto/tendermint/crypto/keys.proto index faaaed6fc..d66f9fc0c 100644 --- a/proto/tendermint/crypto/keys.proto +++ b/proto/tendermint/crypto/keys.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.crypto; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; + import "gogoproto/gogo.proto"; // PublicKey defines the keys available for use with Tendermint Validators diff --git a/proto/tendermint/crypto/proof.proto b/proto/tendermint/crypto/proof.proto index bde5a4ff9..975df7685 100644 --- a/proto/tendermint/crypto/proof.proto +++ b/proto/tendermint/crypto/proof.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.crypto; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; + import "gogoproto/gogo.proto"; message Proof { diff --git a/proto/tendermint/libs/bits/types.proto b/proto/tendermint/libs/bits/types.proto index 1ea81d33f..3111d113a 100644 --- a/proto/tendermint/libs/bits/types.proto +++ b/proto/tendermint/libs/bits/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.libs.bits; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/libs/bits"; + message BitArray { int64 bits = 1; repeated uint64 elems = 2; diff --git a/proto/tendermint/mempool/types.proto b/proto/tendermint/mempool/types.proto index 7fa53ef79..b55d9717b 100644 --- a/proto/tendermint/mempool/types.proto +++ b/proto/tendermint/mempool/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.mempool; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/mempool"; + message Txs { repeated bytes txs = 1; } diff --git a/proto/tendermint/p2p/conn.proto b/proto/tendermint/p2p/conn.proto index 62abd4f5f..b12de6c82 100644 --- a/proto/tendermint/p2p/conn.proto +++ b/proto/tendermint/p2p/conn.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.p2p; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; + import "gogoproto/gogo.proto"; import "tendermint/crypto/keys.proto"; diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto index 374047b0f..545743444 100644 --- a/proto/tendermint/p2p/pex.proto +++ b/proto/tendermint/p2p/pex.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.p2p; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; + import "gogoproto/gogo.proto"; message PexAddress { diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto index e4e86434a..faccd59d2 100644 --- a/proto/tendermint/p2p/types.proto +++ b/proto/tendermint/p2p/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.p2p; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; + import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto index 12f7a1d23..94e22e834 100644 --- a/proto/tendermint/statesync/types.proto +++ b/proto/tendermint/statesync/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.statesync; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; + import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "tendermint/types/params.proto"; diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto index 8a713b7dc..84e9bb15d 100644 --- a/proto/tendermint/types/block.proto +++ b/proto/tendermint/types/block.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.types; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "tendermint/types/evidence.proto"; diff --git a/proto/tendermint/types/canonical.proto b/proto/tendermint/types/canonical.proto index 58d8c44e9..e88fd6ffe 100644 --- a/proto/tendermint/types/canonical.proto +++ b/proto/tendermint/types/canonical.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.types; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "google/protobuf/timestamp.proto"; diff --git a/proto/tendermint/types/events.proto b/proto/tendermint/types/events.proto index 1ef715872..a1e5cc498 100644 --- a/proto/tendermint/types/events.proto +++ b/proto/tendermint/types/events.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.types; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + message EventDataRoundState { int64 height = 1; int32 round = 2; diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto index d42c84363..44ef70cf6 100644 --- a/proto/tendermint/types/evidence.proto +++ b/proto/tendermint/types/evidence.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.types; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "tendermint/types/types.proto"; diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index a87670c9f..c5a9e048f 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.types; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + import "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index d9e6973e9..bc2c53196 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.types; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "tendermint/crypto/proof.proto"; diff --git a/proto/tendermint/types/types.proto.intermediate b/proto/tendermint/types/types.proto.intermediate new file mode 100644 index 000000000..280cd7133 --- /dev/null +++ b/proto/tendermint/types/types.proto.intermediate @@ -0,0 +1,192 @@ +syntax = "proto3"; +package tendermint.types; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "tendermint/crypto/proof.proto"; +import "tendermint/version/types.proto"; +import "tendermint/types/validator.proto"; + +// This file is a temporary workaround to enable development during the ABCI++ +// project. This file should be deleted and any references to it removed when +// the ongoing work on ABCI++ is completed. +// +// This file supports building of the `tendermint.abci` proto package. +// For more information, see https://github.com/tendermint/tendermint/issues/8066 + +// BlockIdFlag indicates which BlockID the signature is for +enum BlockIDFlag { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + BLOCK_ID_FLAG_UNKNOWN = 0 + [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; + BLOCK_ID_FLAG_ABSENT = 1 + [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; + BLOCK_ID_FLAG_COMMIT = 2 + [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; +} + +// SignedMsgType is a type of signed message in the consensus. +enum SignedMsgType { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + SIGNED_MSG_TYPE_UNKNOWN = 0 + [(gogoproto.enumvalue_customname) = "UnknownType"]; + // Votes + SIGNED_MSG_TYPE_PREVOTE = 1 + [(gogoproto.enumvalue_customname) = "PrevoteType"]; + SIGNED_MSG_TYPE_PRECOMMIT = 2 + [(gogoproto.enumvalue_customname) = "PrecommitType"]; + + // Proposals + SIGNED_MSG_TYPE_PROPOSAL = 32 + [(gogoproto.enumvalue_customname) = "ProposalType"]; +} + +// PartsetHeader +message PartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +message Part { + uint32 index = 1; + bytes bytes = 2; + tendermint.crypto.Proof proof = 3 [(gogoproto.nullable) = false]; +} + +// BlockID +message BlockID { + bytes hash = 1; + PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +// -------------------------------- + +// Header defines the structure of a Tendermint block header. +message Header { + // basic block info + tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 height = 3; + google.protobuf.Timestamp time = 4 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + // prev block info + BlockID last_block_id = 5 [(gogoproto.nullable) = false]; + + // hashes of block data + bytes last_commit_hash = 6; // commit from validators from the last block + bytes data_hash = 7; // transactions + + // hashes from the app output from the prev block + bytes validators_hash = 8; // validators for the current block + bytes next_validators_hash = 9; // validators for the next block + bytes consensus_hash = 10; // consensus params for current block + bytes app_hash = 11; // state after txs from the previous block + bytes last_results_hash = + 12; // root hash of all results from the txs from the previous block + + // consensus info + bytes evidence_hash = 13; // evidence included in the block + bytes proposer_address = 14; // original proposer of the block +} + +// Data contains the set of transactions included in the block +message Data { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + repeated bytes txs = 1; +} + +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. +message Vote { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + BlockID block_id = 4 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "BlockID" + ]; // zero if vote is nil. + google.protobuf.Timestamp timestamp = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes validator_address = 6; + int32 validator_index = 7; + bytes signature = 8; + VoteExtension vote_extension = 9; +} + +// VoteExtension is app-defined additional information to the validator votes. +message VoteExtension { + bytes app_data_to_sign = 1; + bytes app_data_self_authenticating = 2; +} + +// VoteExtensionToSign is a subset of VoteExtension that is signed by the validators private key. +// VoteExtensionToSign is extracted from an existing VoteExtension. +message VoteExtensionToSign { + bytes app_data_to_sign = 1; +} + +// Commit contains the evidence that a block was committed by a set of +// validators. +message Commit { + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 + [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; +} + +// CommitSig is a part of the Vote included in a Commit. +message CommitSig { + BlockIDFlag block_id_flag = 1; + bytes validator_address = 2; + google.protobuf.Timestamp timestamp = 3 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 4; + VoteExtensionToSign vote_extension = 5; +} + +message Proposal { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + int32 pol_round = 4; + BlockID block_id = 5 + [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 6 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 7; +} + +message SignedHeader { + Header header = 1; + Commit commit = 2; +} + +message LightBlock { + SignedHeader signed_header = 1; + tendermint.types.ValidatorSet validator_set = 2; +} + +message BlockMeta { + BlockID block_id = 1 + [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + int64 block_size = 2; + Header header = 3 [(gogoproto.nullable) = false]; + int64 num_txs = 4; +} + +// TxProof represents a Merkle proof of the presence of a transaction in the +// Merkle tree. +message TxProof { + bytes root_hash = 1; + bytes data = 2; + tendermint.crypto.Proof proof = 3; +} diff --git a/proto/tendermint/types/validator.proto b/proto/tendermint/types/validator.proto index 4ab5e4c32..49860b96d 100644 --- a/proto/tendermint/types/validator.proto +++ b/proto/tendermint/types/validator.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.types; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + import "gogoproto/gogo.proto"; import "tendermint/crypto/keys.proto"; diff --git a/proto/tendermint/version/types.proto b/proto/tendermint/version/types.proto index d7d4cc09f..37124dd4e 100644 --- a/proto/tendermint/version/types.proto +++ b/proto/tendermint/version/types.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package tendermint.version; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/version"; + import "gogoproto/gogo.proto"; // Consensus captures the consensus rules for processing a block in the diff --git a/rpc/client/eventstream/eventstream_test.go b/rpc/client/eventstream/eventstream_test.go index 110dc8a90..ca27734e2 100644 --- a/rpc/client/eventstream/eventstream_test.go +++ b/rpc/client/eventstream/eventstream_test.go @@ -90,6 +90,55 @@ func TestStream_lostItem(t *testing.T) { s.stopWait() } +func TestMinPollTime(t *testing.T) { + defer leaktest.Check(t) + + s := newStreamTester(t, ``, eventlog.LogSettings{ + WindowSize: 30 * time.Second, + }, nil) + + s.publish("bad", "whatever") + + // Waiting for an item on a log with no matching events incurs a minimum + // wait time and reports no events. + ctx := context.Background() + filter := &coretypes.EventFilter{Query: `tm.event = 'good'`} + var zero cursor.Cursor + + t.Run("NoneMatch", func(t *testing.T) { + start := time.Now() + + // Request a very short delay, and affirm we got the server's minimum. + rsp, err := s.env.Events(ctx, filter, 1, zero, zero, 10*time.Millisecond) + if err != nil { + t.Fatalf("Events failed: %v", err) + } else if elapsed := time.Since(start); elapsed < time.Second { + t.Errorf("Events returned too quickly: got %v, wanted 1s", elapsed) + } else if len(rsp.Items) != 0 { + t.Errorf("Events returned %d items, expected none", len(rsp.Items)) + } + }) + + s.publish("good", "whatever") + + // Waiting for an available matching item incurs no delay. + t.Run("SomeMatch", func(t *testing.T) { + start := time.Now() + + // Request a long-ish delay and affirm we don't block for it. + // Check for this by ensuring we return sooner than the minimum delay, + // since we don't know the exact timing. + rsp, err := s.env.Events(ctx, filter, 1, zero, zero, 10*time.Second) + if err != nil { + t.Fatalf("Events failed: %v", err) + } else if elapsed := time.Since(start); elapsed > 500*time.Millisecond { + t.Errorf("Events returned too slowly: got %v, wanted immediate", elapsed) + } else if len(rsp.Items) == 0 { + t.Error("Events returned no items, wanted at least 1") + } + }) +} + // testItem is a wrapper for comparing item results in a friendly output format // for the cmp package. type testItem struct { diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 1591862cf..163093c84 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -49,7 +49,7 @@ func TestHTTPSimple(t *testing.T) { if err != nil { log.Fatal(err) } - if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() { + if bres.CheckTx.IsErr() || bres.TxResult.IsErr() { log.Fatal("BroadcastTxCommit transaction failed") } diff --git a/rpc/client/interface.go b/rpc/client/interface.go index acc54889f..4b55d36e6 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -90,7 +90,7 @@ type SignClient interface { ) (*coretypes.ResultTxSearch, error) // BlockSearch defines a method to search for a paginated set of blocks by - // BeginBlock and EndBlock event search criteria. + // FinalizeBlock event search criteria. BlockSearch( ctx context.Context, query string, diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 1d04fa4cd..8c652444b 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -56,7 +56,7 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes return &res, nil } fb := a.App.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) - res.DeliverTx = *fb.Txs[0] + res.TxResult = *fb.TxResults[0] res.Height = -1 // TODO return &res, nil } diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 18fbbf6a9..489133a5b 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -38,8 +38,8 @@ func TestABCIMock(t *testing.T) { BroadcastCommit: mock.Call{ Args: goodTx, Response: &coretypes.ResultBroadcastTxCommit{ - CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, - DeliverTx: abci.ResponseDeliverTx{Data: bytes.HexBytes("deliver")}, + CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, + TxResult: abci.ExecTxResult{Data: bytes.HexBytes("deliver")}, }, Error: errors.New("bad tx"), }, @@ -76,7 +76,7 @@ func TestABCIMock(t *testing.T) { require.NoError(t, err, "%+v", err) assert.EqualValues(t, 0, bres.CheckTx.Code) assert.EqualValues(t, "stand", bres.CheckTx.Data) - assert.EqualValues(t, "deliver", bres.DeliverTx.Data) + assert.EqualValues(t, "deliver", bres.TxResult.Data) } func TestABCIRecorder(t *testing.T) { @@ -179,8 +179,8 @@ func TestABCIApp(t *testing.T) { res, err := m.BroadcastTxCommit(ctx, types.Tx(tx)) require.NoError(t, err) assert.True(t, res.CheckTx.IsOK()) - require.NotNil(t, res.DeliverTx) - assert.True(t, res.DeliverTx.IsOK()) + require.NotNil(t, res.TxResult) + assert.True(t, res.TxResult.IsOK()) // commit // TODO: This may not be necessary in the future diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 3ad241380..7d3726496 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -324,7 +324,7 @@ func TestClientMethodCalls(t *testing.T) { k, v, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) require.NoError(t, err) - require.True(t, bres.DeliverTx.IsOK()) + require.True(t, bres.TxResult.IsOK()) txh := bres.Height apph := txh + 1 // this is where the tx will be applied to the state @@ -443,7 +443,7 @@ func TestClientMethodCalls(t *testing.T) { bres, err := c.BroadcastTxCommit(ctx, tx) require.NoError(t, err, "%d: %+v", i, err) require.True(t, bres.CheckTx.IsOK()) - require.True(t, bres.DeliverTx.IsOK()) + require.True(t, bres.TxResult.IsOK()) require.Equal(t, 0, pool.Size()) }) diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index 7aaf7552c..8968f9868 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -65,12 +65,12 @@ type ResultCommit struct { // ABCI results from a block type ResultBlockResults struct { - Height int64 `json:"height,string"` - TxsResults []*abci.ResponseDeliverTx `json:"txs_results"` - TotalGasUsed int64 `json:"total_gas_used,string"` - FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` - ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` - ConsensusParamUpdates *tmproto.ConsensusParams `json:"consensus_param_updates"` + Height int64 `json:"height,string"` + TxsResults []*abci.ExecTxResult `json:"txs_results"` + TotalGasUsed int64 `json:"total_gas_used,string"` + FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` + ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` + ConsensusParamUpdates *tmproto.ConsensusParams `json:"consensus_param_updates"` } // NewResultCommit is a helper to initialize the ResultCommit with @@ -241,10 +241,10 @@ type ResultBroadcastTx struct { // CheckTx and DeliverTx results type ResultBroadcastTxCommit struct { - CheckTx abci.ResponseCheckTx `json:"check_tx"` - DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"` - Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height,string"` + CheckTx abci.ResponseCheckTx `json:"check_tx"` + TxResult abci.ExecTxResult `json:"tx_result"` + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height,string"` } // ResultCheckTx wraps abci.ResponseCheckTx. @@ -254,12 +254,12 @@ type ResultCheckTx struct { // Result of querying for a tx type ResultTx struct { - Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height,string"` - Index uint32 `json:"index"` - TxResult abci.ResponseDeliverTx `json:"tx_result"` - Tx types.Tx `json:"tx"` - Proof types.TxProof `json:"proof,omitempty"` + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height,string"` + Index uint32 `json:"index"` + TxResult abci.ExecTxResult `json:"tx_result"` + Tx types.Tx `json:"tx"` + Proof types.TxProof `json:"proof,omitempty"` } // Result of searching for txs diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index 3a626e43a..ea14a6cf8 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -344,10 +344,6 @@ func (c *WSClient) writeRoutine(ctx context.Context) { defer func() { ticker.Stop() c.conn.Close() - // err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - // } c.wg.Done() }() @@ -430,14 +426,7 @@ func (c *WSClient) readRoutine(ctx context.Context) { // ID. According to the spec, they should be notifications (requests // without IDs). // https://github.com/tendermint/tendermint/issues/2949 - // c.mtx.Lock() - // if _, ok := c.sentIDs[response.ID.(types.JSONRPCIntID)]; !ok { - // c.Logger.Error("unsolicited response ID", "id", response.ID, "expected", c.sentIDs) - // c.mtx.Unlock() - // continue - // } - // delete(c.sentIDs, response.ID.(types.JSONRPCIntID)) - // c.mtx.Unlock() + // // Combine a non-blocking read on BaseService.Quit with a non-blocking write on ResponsesCh to avoid blocking // c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop // both readRoutine and writeRoutine diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 19deac607..4c13b322a 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -98,7 +98,7 @@ func StartTendermint( } } - papp := abciclient.NewLocalCreator(app) + papp := abciclient.NewLocalClient(logger, app) tmNode, err := node.New(ctx, conf, logger, papp, nil) if err != nil { return nil, func(_ context.Context) error { cancel(); return nil }, err diff --git a/scripts/abci-gen.sh b/scripts/abci-gen.sh new file mode 100755 index 000000000..fe3728ad4 --- /dev/null +++ b/scripts/abci-gen.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# This file was added during development of ABCI++. This file is a script to allow +# the intermediate proto files to be built while active development proceeds +# on ABCI++. +# This file should be removed when work on ABCI++ is complete. +# For more information, see https://github.com/tendermint/tendermint/issues/8066. +set -euo pipefail + +cp ./proto/tendermint/abci/types.proto.intermediate ./proto/tendermint/abci/types.proto +cp ./proto/tendermint/types/types.proto.intermediate ./proto/tendermint/types/types.proto + +MODNAME="$(go list -m)" +find ./proto/tendermint -name '*.proto' -not -path "./proto/tendermint/abci/types.proto" \ + -exec sh ./scripts/protopackage.sh {} "$MODNAME" ';' + +sh ./scripts/protopackage.sh ./proto/tendermint/abci/types.proto $MODNAME "abci/types" + +make proto-gen + +mv ./proto/tendermint/abci/types.pb.go ./abci/types + +echo "proto files have been compiled" + +echo "checking out copied files" + +find proto/tendermint/ -name '*.proto' -not -path "*.intermediate"\ + | xargs -I {} git checkout {} + +find proto/tendermint/ -name '*.pb.go' \ + | xargs -I {} git checkout {} diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh deleted file mode 100755 index 8e121448b..000000000 --- a/scripts/protocgen.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# By default, this script runs against the latest commit to the master branch -# in the Tendermint spec repository. To use this script with a different version -# of the spec repository, run it with the $VERS environment variable set to the -# desired branch name or commit hash from the spec repo. - -: ${VERS:=master} - -echo "fetching proto files" - -# Get shortened ref of commit -REF=$(curl -H "Accept: application/vnd.github.v3.sha" -qL \ - "https://api.github.com/repos/tendermint/spec/commits/${VERS}" \ - | cut -c -7) - -readonly OUTDIR="tendermint-spec-${REF}" -curl -qL "https://api.github.com/repos/tendermint/spec/tarball/${REF}" | tar -xzf - ${OUTDIR}/ - -cp -r ${OUTDIR}/proto/tendermint/* ./proto/tendermint -cp -r ${OUTDIR}/third_party/** ./third_party - -MODNAME="$(go list -m)" -find ./proto/tendermint -name '*.proto' -not -path "./proto/tendermint/abci/types.proto" \ - -exec sh ./scripts/protopackage.sh {} "$MODNAME" ';' - -# For historical compatibility, the abci file needs to get a slightly different import name -# so that it can be moved into the ./abci/types directory. -sh ./scripts/protopackage.sh ./proto/tendermint/abci/types.proto $MODNAME "abci/types" - -buf generate --path proto/tendermint --template ./${OUTDIR}/buf.gen.yaml --config ./${OUTDIR}/buf.yaml - -mv ./proto/tendermint/abci/types.pb.go ./abci/types - -echo "proto files have been compiled" - -echo "removing copied files" - -find ${OUTDIR}/proto/tendermint/ -name *.proto \ - | sed "s/$OUTDIR\/\(.*\)/\1/g" \ - | xargs -I {} rm {} - -rm -rf ${OUTDIR} diff --git a/scripts/protopackage.sh b/scripts/protopackage.sh index a69e758ca..5eace2752 100755 --- a/scripts/protopackage.sh +++ b/scripts/protopackage.sh @@ -16,7 +16,7 @@ if [[ ! -z "$3" ]]; then fi -if ! grep -q 'option\s\+go_package\s\+=\s\+.*;' $FNAME; then +if ! grep -q 'option\s\+go_package\s\+=\s\+.*;' $FNAME; then sed -i "s/\(package tendermint.*\)/\1\n\noption go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME else sed -i "s/option\s\+go_package\s\+=\s\+.*;/option go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME diff --git a/spec/abci++/README.md b/spec/abci++/README.md index b8b75f46b..38feba9d7 100644 --- a/spec/abci++/README.md +++ b/spec/abci++/README.md @@ -20,14 +20,12 @@ for handling all ABCI++ methods. Thus, Tendermint always sends the `Request*` messages and receives the `Response*` messages in return. -All ABCI++ messages and methods are defined in -[protocol buffers](https://github.com/tendermint/tendermint/blob/master/proto/spec/abci/types.proto). +All ABCI++ messages and methods are defined in [protocol buffers](../../proto/tendermint/abci/types.proto). This allows Tendermint to run with applications written in many programming languages. This specification is split as follows: -- [Basic concepts and definitions](./abci++_basic_concepts_002_draft.md) - definitions and descriptions - of concepts that are needed to understand other parts of this sepcification. +- [Overview and basic concepts](./abci++_basic_concepts_002_draft.md) - interface's overview and concepts needed to understand other parts of this specification. - [Methods](./abci++_methods_002_draft.md) - complete details on all ABCI++ methods and message types. - [Requirements for the Application](./abci++_app_requirements_002_draft.md) - formal requirements diff --git a/spec/abci++/abci++_basic_concepts_002_draft.md b/spec/abci++/abci++_basic_concepts_002_draft.md index f2fda37dc..86f235e9c 100644 --- a/spec/abci++/abci++_basic_concepts_002_draft.md +++ b/spec/abci++/abci++_basic_concepts_002_draft.md @@ -1,51 +1,259 @@ --- order: 1 -title: Basic concepts and definitions +title: Overview and basic concepts --- -# Basic concepts and definitions +## Outline +- [ABCI++ vs. ABCI](#abci-vs-abci) +- [Methods overview](#methods-overview) + - [Consensus methods](#consensus-methods) + - [Mempool methods](#mempool-methods) + - [Info methods](#info-methods) + - [State-sync methods](#state-sync-methods) +- [Next-block execution vs. same-block execution](#next-block-execution-vs-same-block-execution) + - [Tendermint timeouts](#tendermint-timeouts-in-same-block-execution) +- [Determinism](#determinism) +- [Errors](#errors) +- [Events](#events) +- [Evidence](#evidence) -## Connections +# Overview and basic concepts -ABCI++ applications can run either within the _same_ process as the Tendermint -state-machine replication engine, or as a _separate_ process from the state-machine -replication engine. When run within the same process, Tendermint will call the ABCI++ -application methods directly as Go method calls. +## ABCI++ vs. ABCI +[↑ Back to Outline](#outline) -When Tendermint and the ABCI++ application are run as separate processes, Tendermint -opens four connections to the application for ABCI++ methods. The connections each -handle a subset of the ABCI++ method calls. These subsets are defined as follows: +With ABCI, the application can only act at one phase in consensus, immediately after a block has been finalized. This restriction on the application prevents numerous features for the application, including many scalability improvements that are now better understood than when ABCI was first written. For example, many of the scalability proposals can be boiled down to "Make the miner / block proposers / validators do work, so the network does not have to". This includes optimizations such as tx-level signature aggregation, state transition proofs, etc. Furthermore, many new security properties cannot be achieved in the current paradigm, as the application cannot enforce validators to do more than just finalize txs. This includes features such as threshold cryptography, and guaranteed IBC connection attempts. -### **Consensus** connection +ABCI++ overcomes these limitations by allowing the application to intervene at three key places of the block execution. The new interface allows block proposers to perform application-dependent work in a block through the `PrepareProposal` method; validators to perform application-dependent work in a proposed block through the `ProcessProposal` method; and applications to require their validators do more than just validate blocks, e.g., validator guaranteed IBC connection attempts, through the `ExtendVote` and `VerifyVoteExtension` methods. Furthermore, ABCI++ renames {`BeginBlock`, [`DeliverTx`], `EndBlock`} to `FinalizeBlock`, as a simplified way to deliver a decided block to the Application. -* Driven by a consensus protocol and is responsible for block execution. -* Handles the `InitChain`, `PrepareProposal`, `ProcessProposal`, `ExtendVote`, - `VerifyVoteExtension`, and `FinalizeBlock` method calls. +## Methods overview +[↑ Back to Outline](#outline) -### **Mempool** connection +Methods can be classified into four categories: consensus, mempool, info, and state-sync. -* For validating new transactions, before they're shared or included in a block. -* Handles the `CheckTx` calls. +### Consensus/block execution methods -### **Info** connection +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, method `FinalizeBlock` is executed at the end of each +block, resulting in an updated Application state. +During consensus execution of a block height, before method `FinalizeBlock` is +called, methods `PrepareProposal`, `ProcessProposal`, `ExtendVote`, and +`VerifyVoteExtension` may be called several times. +See [Tendermint's expected behavior](abci++_tmint_expected_behavior_002_draft.md) +for details on the possible call sequences of these methods. + +* [**InitChain:**](./abci++_methods_002_draft.md#initchain) This method initializes the blockchain. Tendermint calls it once upon genesis. + +* [**PrepareProposal:**](./abci++_methods_002_draft.md#prepareproposal) It allows the block proposer to perform application-dependent work in a block before using it as its proposal. This enables, for instance, batch optimizations to a block, which has been empirically demonstrated to be a key component for scaling. Method `PrepareProposal` is called every time Tendermint is about to send +a proposal message, but no previous proposal has been locked at Tendermint level. +Tendermint gathers outstanding transactions from the mempool, generates a block header, and uses +them to create a block to propose. Then, it calls `RequestPrepareProposal` +with the newly created proposal, called _raw proposal_. The Application can +make changes to the raw proposal, such as modifying transactions, and returns +the (potentially) modified proposal, called _prepared proposal_ in the +`Response*` call. The logic modifying the raw proposal can be non-deterministic. + +* [**ProcessProposal:**](./abci++_methods_002_draft.md#processproposal) It allows a validator to perform application-dependent work in a proposed block. This enables features such as allowing validators to reject a block according to whether the state machine deems it valid, and changing the block execution pipeline. Tendermint calls it when it receives a proposal and it is not locked on a block. The Application cannot +modify the proposal at this point but can reject it if it realizes it is invalid. +If that is the case, Tendermint will prevote `nil` on the proposal, which has +strong liveness implications for Tendermint. As a general rule, the Application +SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of +the proposal is invalid (e.g., an invalid transaction); the Application can +ignore the invalid part of the prepared proposal at block execution time. + +* [**ExtendVote:**](./abci++_methods_002_draft.md#extendvote) It allows applications to force their validators to do more than just validate within consensus. `ExtendVote` allows applications to include non-deterministic data, opaque to Tendermint, to precommit messages (the final round of voting). +The data, called _vote extension_, will also be made available to the +application in the next height, along with the vote it is extending, in the rounds +where the local process is the proposer. +The Application may also choose not to include any vote extension. +Tendermint calls it when is about to send a non-`nil` precommit message. + +* [**VerifyVoteExtension:**](./abci++_methods_002_draft.md#verifyvoteextension) It allows validators to validate the vote extension data attached to a precommit message. If the validation fails, the precommit message will be deemed invalid and ignored +by Tendermint. This has a negative impact on Tendermint's liveness, i.e., if vote extensions repeatedly cannot be verified by correct validators, Tendermint may not be able to finalize a block even if sufficiently many (+2/3) of the validators send precommit votes for that block. Thus, `VerifyVoteExtension` should be used with special care. +As a general rule, an Application that detects an invalid vote extension SHOULD +accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. Tendermint calls it when +a process receives a precommit message with a (possibly empty) vote extension. + +* [**FinalizeBlock:**](./abci++_methods_002_draft.md#finalizeblock) It delivers a decided block to the Application. The Application must execute the transactions in the block in order and update its state accordingly. Cryptographic commitments to the block and transaction results, via the corresponding +parameters in `ResponseFinalizeBlock`, are included in the header of the next block. Tendermint calls it when a new block is decided. + +### Mempool methods + +* [**CheckTx:**](./abci++_methods_002_draft.md#checktx) This method allows the Application to validate transactions against its current state, e.g., checking signatures and account balances. If a transaction passes the validation, then tendermint adds it to its local mempool, discarding it otherwise. Tendermint calls it when it receives a new transaction either coming from an external user or another node. Furthermore, Tendermint can be configured to re-call `CheckTx` on any decided transaction (after `FinalizeBlock`). + +### Info methods + +* [**Info:**](./abci++_methods_002_draft.md#info) Used to sync Tendermint with the Application during a handshake that happens on startup. + +* [**Query:**](./abci++_methods_002_draft.md#query) Clients can use this method to query the Application for information about the application state. + +### State-sync methods + +State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying +state machine snapshots instead of replaying historical blocks. For more details, see the +[state sync section](../p2p/messages/state-sync.md). + +New nodes will discover and request snapshots from other nodes in the P2P network. +A Tendermint node that receives a request for snapshots from a peer will call +`ListSnapshots` on its Application. The Application returns the list of locally avaiable snapshots. +Note that the list does not contain the actual snapshot but metadata about it: height at which the snapshot was taken, application-specific verification data and more (see [snapshot data type](./abci++_methods_002_draft.md#snapshot) for more details). After receiving a list of available snapshots from a peer, the new node can offer any of the snapshots in the list to its local Application via the `OfferSnapshot` method. The Application can check at this point the validity of the snapshot metadata. -* For initialization and for queries from the user. -* Handles the `Info` and `Query` calls. +Snapshots may be quite large and are thus broken into smaller "chunks" that can be +assembled into the whole snapshot. Once the Application accepts a snapshot and +begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. +The node providing "chunks" will fetch them from its local Application using +the `LoadSnapshotChunk` method. + +As the new node receives "chunks" it will apply them sequentially to the local +application with `ApplySnapshotChunk`. When all chunks have been applied, the +Application's `AppHash` is retrieved via an `Info` query. +To ensure that the sync proceeded correctly, Tendermint compares the local Application's `AppHash` to the `AppHash` stored on the blockchain (verified via +[light client verification](../light-client/verification/README.md)). + +In summary: + +* [**ListSnapshots:**](./abci++_methods_002_draft.md#listsnapshots) Used by nodes to discover available snapshots on peers. -### **Snapshot** connection +* [**LoadSnapshotChunk:**](./abci++_methods_002_draft.md#loadsnapshotchunk) Used by Tendermint to retrieve snapshot chunks from the application to send to peers. -* For serving and restoring [state sync snapshots](../abci/apps.md#state-sync). -* Handles the `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` calls. +* [**OfferSnapshot:**](./abci++_methods_002_draft.md#offersnapshot) When a node receives a snapshot from a peer, Tendermint uses this method to offer the snapshot to the Application. -Additionally, there is a `Flush` method that is called on every connection, -and an `Echo` method that is just for debugging. +* [**ApplySnapshotChunk:**](./abci++_methods_002_draft.md#applysnapshotchunk) Used by Tendermint to hand snapshot chunks to the Application. ->**TODO** Figure out what to do with this. +### Other methods + +Additionally, there is a [**Flush**](./abci++_methods_002_draft.md#flush) method that is called on every connection, +and an [**Echo**](./abci++_methods_002_draft.md#echo) method that is just for debugging. More details on managing state across connections can be found in the section on [ABCI Applications](../abci/apps.md). +## Next-block execution vs. same-block execution +[↑ Back to Outline](#outline) + +In the original ABCI protocol, the only moment when the Application had access to a +block was after it was decided. This led to a block execution model, called _next-block +execution_, where some fields hashed in a block header refer to the execution of the +previous block, namely: + +* the Merkle root of the Application's state +* the transaction results +* the consensus parameter updates +* the validator updates + +With ABCI++, an Application may decide to keep using the next-block execution model, by doing all its processing in `FinalizeBlock`; +however the new methods introduced, `PrepareProposal` and `ProcessProposal` allow +for a new execution model, called _same-block execution_. An Application implementing +this execution model, upon receiving a raw proposal via `RequestPrepareProposal` +and potentially modifying its transaction list, +fully executes the resulting prepared proposal as though it was the decided block. +The results of the block execution are used as follows: + +* The block execution may generate a set of events. The Application should store these events and return them back to Tendermint during the `FinalizeBlock` call if the block is finally decided. +* The Merkle root resulting from executing the prepared proposal is provided in + `ResponsePrepareProposal` and thus refers to the **current block**. Tendermint + will use it in the prepared proposal's header. +* likewise, the transaction results from executing the prepared proposal are + provided in `ResponsePrepareProposal` and refer to the transactions in the + **current block**. Tendermint will use them to calculate the results hash + in the prepared proposal's header. +* The consensus parameter updates and validator updates are also provided in + `ResponsePrepareProposal` and reflect the result of the prepared proposal's + execution. They come into force in height H+1 (as opposed to the H+2 rule + in next-block execution model). + +If the Application decides to keep the next-block execution model, it will not +provide any data in `ResponsePrepareProposal`, other than an optionally modified +transaction list. + +In the long term, the execution model will be set in a new boolean parameter +*same_block* in `ConsensusParams`. +It **must not** be changed once the blockchain has started unless the Application +developers _really_ know what they are doing. +However, modifying `ConsensusParams` structure cannot be done lightly if we are to +preserve blockchain compatibility. Therefore we need an interim solution until +soft upgrades are specified and implemented in Tendermint. This somewhat _unsafe_ +solution consists in Tendermint assuming same-block execution if the Application +fills the above mentioned fields in `ResponsePrepareProposal`. + +### Tendermint timeouts in same-block execution + +The new same-block execution mode requires the Application to fully execute the +prepared block at `PrepareProposal` time. This execution is synchronous, so +Tendermint cannot make progress until the Application returns from `PrepareProposal`. +This stands on Tendermint's critical path: if the Application takes a long time +executing the block, the default value of _TimeoutPropose_ might not be sufficient +to accommodate the long block execution time and non-proposer processes might time +out and prevote `nil`, thus starting a further round unnecessarily. + +The Application is the best suited to provide a value for _TimeoutPropose_ so +that the block execution time upon `PrepareProposal` fits well in the propose +timeout interval. + +Currently, the Application can override the value of _TimeoutPropose_ via the +`config.toml` file. In the future, `ConsensusParams` will have an extra field +with the current _TimeoutPropose_ value so that the Application can adapt it at every height. + +## Determinism +[↑ Back to Outline](#outline) + +ABCI++ applications must implement deterministic finite-state machines to be +securely replicated by the Tendermint consensus engine. This means block execution +over the Consensus Connection must be strictly deterministic: given the same +ordered set of transactions, all nodes will compute identical responses, for all +successive `FinalizeBlock` calls. This is critical because the +responses are included in the header of the next block, either via a Merkle root +or directly, so all nodes must agree on exactly what they are. + +For this reason, it is recommended that application state is not exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. The Application must only change its state based on input +from block execution (`FinalizeBlock` calls), and not through +any other kind of request. This is the only way to ensure all nodes see the same +transactions and compute the same results. + +Some Applications may choose to execute the blocks that are about to be proposed +(via `PrepareProposal`), or those that the Application is asked to validate +(via `ProcessProposal`). However, the state changes caused by processing those +proposed blocks must never replace the previous state until `FinalizeBlock` confirms +the block decided. + +Additionally, vote extensions or the validation thereof (via `ExtendVote` or +`VerifyVoteExtension`) must _never_ have side effects on the current state. +They can only be used when their data is provided in a `RequestPrepareProposal` call. + +If there is some non-determinism in the state machine, consensus will eventually +fail as nodes disagree over the correct values for the block header. The +non-determinism must be fixed and the nodes restarted. + +Sources of non-determinism in applications may include: + +* Hardware failures + * Cosmic rays, overheating, etc. +* Node-dependent state + * Random numbers + * Time +* Underspecification + * Library version changes + * Race conditions + * Floating point numbers + * JSON or protobuf serialization + * Iterating through hash-tables/maps/dictionaries +* External Sources + * Filesystem + * Network calls (eg. some external REST API service) + +See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. + +Note that some methods (`Query, CheckTx, FinalizeBlock`) return +explicitly non-deterministic data in the form of `Info` and `Log` fields. The `Log` is +intended for the literal output from the Application's logger, while the +`Info` is any additional info that should be returned. These are the only fields +that are not included in block header computations, so we don't need agreement +on them. All other fields in the `Response*` must be strictly deterministic. + ## Errors +[↑ Back to Outline](#outline) The `Query`, and `CheckTx` methods include a `Code` field in their `Response*`. The `Code` field is also included in type `TxResult`, used by @@ -75,8 +283,6 @@ The handling of non-zero response codes by Tendermint is described below ### `CheckTx` -The `CheckTx` ABCI++ method controls what transactions are considered for inclusion -in a block. When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated transaction will not be added to Tendermint's mempool or it will be removed if it is already included. @@ -91,15 +297,15 @@ Tendermint consensus. ### `Query` -The `Query` ABCI++ method queries the Application for information about application state. When Tendermint receives a `ResponseQuery` with a non-zero `Code`, this code is returned directly to the client that initiated the query. ## Events +[↑ Back to Outline](#outline) Method `CheckTx` includes an `Events` field in its `Response*`. Method `FinalizeBlock` includes an `Events` field at the top level in its -`Response*`, and one `tx_events` field per transaction included in the block. +`Response*`, and one `events` field per transaction included in the block. Applications may respond to these ABCI++ methods with a set of events. Events allow applications to associate metadata about ABCI++ method execution with the transactions and blocks this metadata relates to. @@ -171,11 +377,12 @@ Example: } ``` -## EvidenceType +## Evidence +[↑ Back to Outline](#outline) Tendermint's security model relies on the use of "evidence". Evidence is proof of -malicious behaviour by a network participant. It is the responsibility of Tendermint -to detect such malicious behaviour. When malicious behavior is detected, Tendermint +malicious behavior by a network participant. It is the responsibility of Tendermint +to detect such malicious behavior. When malicious behavior is detected, Tendermint will gossip evidence of the behavior to other nodes and commit the evidence to the chain once it is verified by all validators. This evidence will then be passed on to the Application through ABCI++. It is the responsibility of the @@ -195,207 +402,3 @@ There are two forms of evidence: Duplicate Vote and Light Client Attack. More information can be found in either [data structures](../core/data_structures.md) or [accountability](../light-client/accountability/) -## Vote Extensions - -According to the Tendermint algorithm, a proposed block needs at least a predefined -number of precommit votes in order to be decided. Tendermint gathers all the valid -precommit votes for the decided block that it receives before the block is decided, -and then includes these votes in the proposed block for the next height whenever -the local process is the proposer of the round. - -When Tendermint's consensus is about to send a non-`nil` precommit message, it calls -method `ExtendVote`, which gives the Application the opportunity to include -non-deterministic data, opaque to Tendermint, that will be attached to the precommit -message. The data, called _vote extension_, will also be part of the proposed block -in the next height, along with the vote it is extending. - -The vote extension data is split into two parts, one signed by Tendermint as part -of the vote data structure, and the other (optionally) signed by the Application. -The Application may also choose not to include any vote extension. -When another process receives a precommit message with a vote extension, it calls -method `VerifyVoteExtension` so that the Application can validate the data received. -If the validation fails, the precommit message will be deemed invalid and ignored -by Tendermint. This has negative impact on Tendermint's liveness, i.e., if repeatedly vote extensions by correct validators cannot be verified by correct validators, Tendermint may not be able to finalize a block even if sufficiently many (+2/3) of the validators send precommit votes for that block. Thus, `VerifyVoteExtension` should only be used with special care. -As a general rule, an Application that detects an invalid vote extension SHOULD -accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. - -## Determinism - -ABCI++ applications must implement deterministic finite-state machines to be -securely replicated by the Tendermint consensus engine. This means block execution -over the Consensus Connection must be strictly deterministic: given the same -ordered set of requests, all nodes will compute identical responses, for all -successive `FinalizeBlock` calls. This is critical, because the -responses are included in the header of the next block, either via a Merkle root -or directly, so all nodes must agree on exactly what they are. - -For this reason, it is recommended that applications not be exposed to any -external user or process except via the ABCI connections to a consensus engine -like Tendermint Core. The Application must only change its state based on input -from block execution (`FinalizeBlock` calls), and not through -any other kind of request. This is the only way to ensure all nodes see the same -transactions and compute the same results. - -Some Applications may choose to execute the blocks that are about to be proposed -(via `PrepareProposal`), or those that the Application is asked to validate -(via `Processproposal`). However the state changes caused by processing those -proposed blocks must never replace the previous state until `FinalizeBlock` confirms -the block decided. - -Additionally, vote extensions or the validation thereof (via `ExtendVote` or -`VerifyVoteExtension`) must _never_ have side effects on the current state. -They can only be used when their data is included in a block. - -If there is some non-determinism in the state machine, consensus will eventually -fail as nodes disagree over the correct values for the block header. The -non-determinism must be fixed and the nodes restarted. - -Sources of non-determinism in applications may include: - -* Hardware failures - * Cosmic rays, overheating, etc. -* Node-dependent state - * Random numbers - * Time -* Underspecification - * Library version changes - * Race conditions - * Floating point numbers - * JSON or protobuf serialization - * Iterating through hash-tables/maps/dictionaries -* External Sources - * Filesystem - * Network calls (eg. some external REST API service) - -See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. - -Note that some methods (`Query, CheckTx, FinalizeBlock`) return -explicitly non-deterministic data in the form of `Info` and `Log` fields. The `Log` is -intended for the literal output from the Application's logger, while the -`Info` is any additional info that should be returned. These are the only fields -that are not included in block header computations, so we don't need agreement -on them. All other fields in the `Response*` must be strictly deterministic. - -## Block Execution - -The first time a new blockchain is started, Tendermint calls -`InitChain`. From then on, method `FinalizeBlock` is executed at the end of each -block, resulting in an updated Application state. -During consensus execution of a block height, before method `FinalizeBlock` is -called, methods `PrepareProposal`, `ProcessProposal`, `ExtendVote`, and -`VerifyVoteExtension` may be called a number of times. -See [Tendermint's expected behavior](abci++_tmint_expected_behavior_002_draft.md) -for details on the possible call sequences of these methods. - -Method `PrepareProposal` is called every time Tendermint is about to send -a proposal message, but no previous proposal has been locked at Tendermint level. -Tendermint gathers outstanding transactions from the mempool -(see [PrepareProposal](#PrepareProposal)), generates a block header and uses -them to create a block to propose. Then, it calls `RequestPrepareProposal` -with the newly created proposal, called _raw proposal_. The Application can -make changes to the raw proposal, such as modifying transactions, and returns -the (potentially) modified proposal, called _prepared proposal_ in the -`Response*` call. The logic modifying the raw proposal can be non-deterministic. - -When Tendermint receives a prepared proposal it uses method `ProcessProposal` -to inform the Application of the proposal just received. The Application cannot -modify the proposal at this point but can reject it if it realises it is invalid. -If that is the case, Tendermint will prevote `nil` on the proposal, which has -strong liveness implications for Tendermint. As a general rule, the Application -SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of -the proposal is invalid (e.g., an invalid transaction); the Application can later -ignore the invalid part of the prepared proposal at block execution time. - -Cryptographic commitments to the block and transaction results, via the corresponding -parameters in `FinalizeBlockResponse` are included in the header of the next block. - -## Next-block execution and same-block execution - -With ABCI++ predecessor, ABCI, the only moment when the Application had access to a -block was when it was decided. This led to a block execution model, called _next-block -execution_, where some fields hashed in a block header refer to the execution of the -previous block, namely: - -* the merkle root of the Application's state -* the transaction results -* the consensus parameter updates -* the validator updates - -With ABCI++, an Application may decide to keep using the next-block execution model; -however the new methods introduced, `PrepareProposal` and `ProcessProposal` allow -for a new execution model, called _same-block execution_. An Application implementing -this execution model, upon receiving a raw proposal via `RequestPrepareProposal` -and potentially modifying its transaction list, -fully executes the resulting prepared proposal as though it was the decided block. -The results of the block execution are used as follows: - -* the Application keeps the events generated and provides them if `FinalizeBlock` - is finally called on this prepared proposal. -* the merkle root resulting from executing the prepared proposal is provided in - `ResponsePrepareProposal` and thus refers to the **current block**. Tendermint - will use it in the prepared proposal's header. -* likewise, the transaction results from executing the prepared proposal are - provided in `ResponsePrepareProposal` and refer to the transactions in the - **current block**. Tendermint will use them to calculate the results hash - in the prepared proposal's header. -* the consensus parameter updates and validator updates are also provided in - `ResponsePrepareProposal` and reflect the result of the prepared proposal's - execution. They come into force in height H+1 (as opposed to the H+2 rule - in next-block execution model). - -If the Application decides to keep the next-block execution model, it will not -provide any data in `ResponsePrepareProposal`, other than an optionally modified -transaction list. - -In the long term, the execution model will be set in a new boolean parameter -*same_block* in `ConsensusParams`. -It should **not** be changed once the blockchain has started, unless the Application -developers _really_ know what they are doing. -However, modifying `ConsensusParams` structure cannot be done lightly if we are to -preserve blockchain compatibility. Therefore we need an interim solution until -soft upgrades are specified and implemented in Tendermint. This somewhat _unsafe_ -solution consists in Tendermint assuming same-block execution if the Application -fills the above mentioned fields in `ResponsePrepareProposal`. - -## Tendermint timeouts in same-block execution - -The new same-block execution mode requires the Application to fully execute the -prepared block at `PrepareProposal` time. This execution is synchronous, so -Tendermint cannot make progress until the Application returns from `PrepareProposal`. -This stands on Tendermint's critical path: if the Application takes a long time -executing the block, the default value of _TimeoutPropose_ might not be sufficient -to accomodate the long block execution time and non-proposer processes might time -out and prevote `nil`, thus starting a further round unnecessarily. - -The Application is the best suited to provide a value for _TimeoutPropose_ so -that the block execution time upon `PrepareProposal` fits well in the propose -timeout interval. - -Currently, the Application can override the value of _TimeoutPropose_ via the -`config.toml` file. In the future, `ConsensusParams` may have an extra field -with the current _TimeoutPropose_ value so that the Application has the possibility -to adapt it at every height. - -## State Sync - -State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying -state machine snapshots instead of replaying historical blocks. For more details, see the -[state sync section](../p2p/messages/state-sync.md). - -New nodes will discover and request snapshots from other nodes in the P2P network. -A Tendermint node that receives a request for snapshots from a peer will call -`ListSnapshots` on its Application to retrieve any local state snapshots. After receiving - snapshots from peers, the new node will offer each snapshot received from a peer -to its local Application via the `OfferSnapshot` method. - -Snapshots may be quite large and are thus broken into smaller "chunks" that can be -assembled into the whole snapshot. Once the Application accepts a snapshot and -begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. -The node providing "chunks" will fetch them from its local Application using -the `LoadSnapshotChunk` method. - -As the new node receives "chunks" it will apply them sequentially to the local -application with `ApplySnapshotChunk`. When all chunks have been applied, the -Application's `AppHash` is retrieved via an `Info` query. The `AppHash` is then -compared to the blockchain's `AppHash` which is verified via -[light client verification](../light-client/verification/README.md). diff --git a/spec/abci++/abci++_methods_002_draft.md b/spec/abci++/abci++_methods_002_draft.md index ccd6c2207..834c161bd 100644 --- a/spec/abci++/abci++_methods_002_draft.md +++ b/spec/abci++/abci++_methods_002_draft.md @@ -290,15 +290,10 @@ title: Methods | hash | bytes | The block header's hash of the block to propose. Present for convenience (can be derived from the block header). | 1 | | header | [Header](../core/data_structures.md#header) | The header of the block to propose. | 2 | | txs | repeated bytes | Preliminary list of transactions that have been picked as part of the block to propose. | 3 | - | last_commit_info | [LastCommitInfo](#lastcommitinfo) | Info about the last commit, including the round, the validator list, and which ones signed the last block. | 4 | + | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from Tendermint's data structures. | 4 | | byzantine_validators | repeated [Evidence](#evidence) | List of evidence of validators that acted maliciously. | 5 | | max_tx_bytes | int64 | Currently configured maximum size in bytes taken by the modified transactions. | 6 | ->**TODO**: Add the changes needed in LastCommitInfo for vote extensions - ->**TODO**: DISCUSS: We need to make clear whether a proposer is also running the logic of a non-proposer node (in particular "ProcessProposal") -From the App's perspective, they'll probably skip ProcessProposal - * **Response**: | Name | Type | Description | Field Number | @@ -309,16 +304,15 @@ From the App's perspective, they'll probably skip ProcessProposal | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 4 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 5 | | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 6 | - | app_signed_updates | repeated bytes | Optional changes to the *app_signed* part of vote extensions. | 7 | * **Usage**: * The first five parameters of `RequestPrepareProposal` are the same as `RequestProcessProposal` and `RequestFinalizeBlock`. * The header contains the height, timestamp, and more - it exactly matches the Tendermint block header. - * `RequestPrepareProposal` contains a preliminary set of transactions `txs` that Tendermint considers to be a good block proposal, called _raw block_. The Application can modify this set via `ResponsePrepareProposal.tx_records` (see [TxRecord](#txrecord)). + * `RequestPrepareProposal` contains a preliminary set of transactions `txs` that Tendermint considers to be a good block proposal, called _raw proposal_. The Application can modify this set via `ResponsePrepareProposal.tx_records` (see [TxRecord](#txrecord)). * In this case, the Application should set `ResponsePrepareProposal.modified_tx` to true. - * The Application _can_ reorder, remove or add transactions to the raw block. Let `tx` be a transaction in `txs`: + * The Application _can_ reorder, remove or add transactions to the raw proposal. Let `tx` be a transaction in `txs`: * If the Application considers that `tx` should not be proposed in this block, e.g., there are other transactions with higher priority, then it should not include it in `tx_records`. In this case, Tendermint won't remove `tx` from the mempool. The Application should be extra-careful, as abusing this feature may cause transactions to stay forever in the mempool. * If the Application considers that a `tx` should not be included in the proposal and removed from the mempool, then the Application should include it in `tx_records` and _mark_ it as "REMOVE". In this case, Tendermint will remove `tx` from the mempool. * If the Application wants to add a new transaction, then the Application should include it in `tx_records` and _mark_ it as "ADD". In this case, Tendermint will add it to the mempool. @@ -326,10 +320,6 @@ From the App's perspective, they'll probably skip ProcessProposal > Consider the following example: the Application transforms a client-submitted transaction `t1` into a second transaction `t2`, i.e., the Application asks Tendermint to remove `t1` and add `t2` to the mempool. If a client wants to eventually check what happened to `t1`, it will discover that `t_1` is not in the mempool or in a committed block, getting the wrong idea that `t_1` did not make it into a block. Note that `t_2` _will be_ in a committed block, but unless the Application tracks this information, no component will be aware of it. Thus, if the Application wants traceability, it is its responsability to support it. For instance, the Application could attach to a transformed transaction a list with the hashes of the transactions it derives from. * If the Application modifies the set of transactions, the modified transactions MUST NOT exceed the configured maximum size `RequestPrepareProposal.max_tx_bytes`. * If the Application does not modify the preliminary set of transactions `txs`, then it sets `ResponsePrepareProposal.modified_tx` to false. In this case, Tendermint will ignore the contents of `ResponsePrepareProposal.tx_records`. - * If the Application modifies the *app_signed* part of vote extensions via `ResponsePrepareProposal.app_signed_updates`, - the new total size of those extensions cannot exceed their initial size. - * The Application may choose to not modify the *app_signed* part of vote extensions by leaving parameter - `ResponsePrepareProposal.app_signed_updates` empty. * In same-block execution mode, the Application must provide values for `ResponsePrepareProposal.app_hash`, `ResponsePrepareProposal.tx_results`, `ResponsePrepareProposal.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`, as a result of fully executing the block. @@ -340,7 +330,7 @@ From the App's perspective, they'll probably skip ProcessProposal for blocks `H+1`, and `H+2`. Heights following a validator update are affected in the following way: * `H`: `NextValidatorsHash` includes the new `validator_updates` value. * `H+1`: The validator set change takes effect and `ValidatorsHash` is updated. - * `H+2`: `last_commit_info` is changed to include the altered validator set. + * `H+2`: `local_last_commit` now includes the altered validator set. * `ResponseFinalizeBlock.consensus_param_updates` returned for block `H` apply to the consensus params for block `H+1` even if the change is agreed in block `H`. For more information on the consensus parameters, @@ -414,7 +404,7 @@ Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as propos | hash | bytes | The block header's hash of the proposed block. Present for convenience (can be derived from the block header). | 1 | | header | [Header](../core/data_structures.md#header) | The proposed block's header. | 2 | | txs | repeated bytes | List of transactions that have been picked as part of the proposed block. | 3 | - | last_commit_info | [LastCommitInfo](#lastcommitinfo) | Info about the last commit, including the round , the validator list, and which ones signed the last block. | 4 | + | proposed_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the information in the proposed block. | 4 | | byzantine_validators | repeated [Evidence](#evidence) | List of evidence of validators that acted maliciously. | 5 | * **Response**: @@ -495,18 +485,17 @@ When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which * **Response**: - | Name | Type | Description | Field Number | - |-------------------|-------|---------------------------------------------------------------------|--------------| - | app_signed | bytes | Optional information signed by the Application (not by Tendermint). | 1 | - | tendermint_signed | bytes | Optional information signed by Tendermint. | 2 | + | Name | Type | Description | Field Number | + |-------------------|-------|-----------------------------------------------|--------------| + | vote_extension | bytes | Optional information signed by by Tendermint. | 1 | * **Usage**: - * Both `ResponseExtendVote.app_signed` and `ResponseExtendVote.tendermint_signed` are optional information that will - be attached to the Precommit message. + * `ResponseExtendVote.vote_extension` is optional information that, if present, will be signed by Tendermint and + attached to the Precommit message. * `RequestExtendVote.hash` corresponds to the hash of a proposed block that was made available to the application in a previous call to `ProcessProposal` or `PrepareProposal` for the current height. - * `ResponseExtendVote.app_signed` and `ResponseExtendVote.tendermint_signed` will always be attached to a non-`nil` - Precommit message. If Tendermint is to precommit `nil`, it will not call `RequestExtendVote`. + * `ResponseExtendVote.vote_extension` will only be attached to a non-`nil` Precommit message. If Tendermint is to + precommit `nil`, it will not call `RequestExtendVote`. * The Application logic that creates the extension can be non-deterministic. #### When does Tendermint call it? @@ -520,11 +509,18 @@ then _p_'s Tendermint locks _v_ and sends a Precommit message in the following 1. _p_'s Tendermint sets _lockedValue_ and _validValue_ to _v_, and sets _lockedRound_ and _validRound_ to _r_ 2. _p_'s Tendermint calls `RequestExtendVote` with _id(v)_ (`RequestExtendVote.hash`). The call is synchronous. -3. The Application returns an array of bytes, `ResponseExtendVote.extension`, which is not interpreted by Tendermint. -4. _p_'s Tendermint includes `ResponseExtendVote.extension` as a new field in the Precommit message. -5. _p_'s Tendermint signs and broadcasts the Precommit message. - -In the cases when _p_'s Tendermint is to broadcast `precommit nil` messages (either _2f+1_ `prevote nil` messages received, or _timeoutPrevote_ triggered), _p_'s Tendermint does **not** call `RequestExtendVote` and will include an empty byte array as vote extension in the `precommit nil` message. +3. The Application optionally returns an array of bytes, `ResponseExtendVote.extension`, which is not interpreted by Tendermint. +4. _p_'s Tendermint includes `ResponseExtendVote.extension` in a field of type [CanonicalVoteExtension](#canonicalvoteextension), + it then populates the other fields in [CanonicalVoteExtension](#canonicalvoteextension), and signs the populated + data structure. +5. _p_'s Tendermint constructs and signs the [CanonicalVote](../core/data_structures.md#canonicalvote) structure. +6. _p_'s Tendermint constructs the Precommit message (i.e. [Vote](../core/data_structures.md#vote) structure) + using [CanonicalVoteExtension](#canonicalvoteextension) and [CanonicalVote](../core/data_structures.md#canonicalvote). +7. _p_'s Tendermint broadcasts the Precommit message. + +In the cases when _p_'s Tendermint is to broadcast `precommit nil` messages (either _2f+1_ `prevote nil` messages received, +or _timeoutPrevote_ triggered), _p_'s Tendermint does **not** call `RequestExtendVote` and will not include +a [CanonicalVoteExtension](#canonicalvoteextension) field in the `precommit nil` message. ### VerifyVoteExtension @@ -534,11 +530,10 @@ In the cases when _p_'s Tendermint is to broadcast `precommit nil` messages (eit | Name | Type | Description | Field Number | |-------------------|-------|------------------------------------------------------------------------------------------|--------------| - | app_signed | bytes | Optional information signed by the Application (not by Tendermint). | 1 | - | tendermint_signed | bytes | Optional information signed by Tendermint. | 2 | - | hash | bytes | The header hash of the propsed block that the vote extension refers to. | 3 | - | validator_address | bytes | [Address](../core/data_structures.md#address) of the validator that signed the extension | 4 | - | height | int64 | Height of the block (for sanity check). | 5 | + | hash | bytes | The header hash of the propsed block that the vote extension refers to. | 1 | + | validator_address | bytes | [Address](../core/data_structures.md#address) of the validator that signed the extension | 2 | + | height | int64 | Height of the block (for sanity check). | 3 | + | vote_extension | bytes | Optional information signed by Tendermint. | 4 | * **Response**: @@ -566,11 +561,8 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou 2. The Application returns _accept_ or _reject_ via `ResponseVerifyVoteExtension.accept`. 3. If the Application returns * _accept_, _p_'s Tendermint will keep the received vote, together with its corresponding - vote extension in its internal data structures. It will be used to: - * calculate field _LastCommitHash_ in the header of the block proposed for height _h + 1_ - (in the rounds where _p_ will be proposer). - * populate _LastCommitInfo_ in calls to `RequestPrepareProposal`, `RequestProcessProposal`, - and `RequestFinalizeBlock` in height _h + 1_. + vote extension in its internal data structures. It will be used to populate the [ExtendedCommitInfo](#extendedcommitinfo) + structure in calls to `RequestPrepareProposal`, in rounds of height _h + 1_ where _p_ is the proposer. * _reject_, _p_'s Tendermint will deem the Precommit message invalid and discard it. ### FinalizeBlock @@ -579,19 +571,19 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou * **Request**: - | Name | Type | Description | Field Number | - |----------------------|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------|--------------| - | hash | bytes | The block header's hash. Present for convenience (can be derived from the block header). | 1 | - | header | [Header](../core/data_structures.md#header) | The block header. | 2 | - | txs | repeated bytes | List of transactions committed as part of the block. | 3 | - | last_commit_info | [LastCommitInfo](#lastcommitinfo) | Info about the last commit, including the round, and the list of validators and which ones signed the last block. | 4 | - | byzantine_validators | repeated [Evidence](#evidence) | List of evidence of validators that acted maliciously. | 5 | + | Name | Type | Description | Field Number | + |----------------------|---------------------------------------------|------------------------------------------------------------------------------------------|--------------| + | hash | bytes | The block header's hash. Present for convenience (can be derived from the block header). | 1 | + | header | [Header](../core/data_structures.md#header) | The block header. | 2 | + | txs | repeated bytes | List of transactions committed as part of the block. | 3 | + | decided_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the block that was just decided. | 4 | + | byzantine_validators | repeated [Evidence](#evidence) | List of evidence of validators that acted maliciously. | 5 | * **Response**: | Name | Type | Description | Field Number | |-------------------------|-------------------------------------------------------------|----------------------------------------------------------------------------------|--------------| - | block_events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing | 1 | + | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing | 1 | | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 2 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 4 | @@ -603,7 +595,7 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou * This method is equivalent to the call sequence `BeginBlock`, [`DeliverTx`], `EndBlock`, `Commit` in the previous version of ABCI. * The header exactly matches the Tendermint header of the proposed block. - * The Application can use `RequestFinalizeBlock.last_commit_info` and `RequestFinalizeBlock.byzantine_validators` + * The Application can use `RequestFinalizeBlock.decided_last_commit` and `RequestFinalizeBlock.byzantine_validators` to determine rewards and punishments for the validators. * The application must execute the transactions in full, in the order they appear in `RequestFinalizeBlock.txs`, before returning control to Tendermint. Alternatively, it can commit the candidate state corresponding to the same block @@ -619,7 +611,7 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou for blocks `H+1`, `H+2`, and `H+3`. Heights following a validator update are affected in the following way: - Height `H+1`: `NextValidatorsHash` includes the new `validator_updates` value. - Height `H+2`: The validator set change takes effect and `ValidatorsHash` is updated. - - Height `H+3`: `last_commit_info` is changed to include the altered validator set. + - Height `H+3`: `decided_last_commit` now includes the altered validator set. * `ResponseFinalizeBlock.consensus_param_updates` returned for block `H` apply to the consensus params for block `H+1`. For more information on the consensus parameters, see the [application spec entry on consensus parameters](../abci/apps.md#consensus-parameters). @@ -728,25 +720,16 @@ Most of the data structures used in ABCI are shared [common data structures](../ | DUPLICATE_VOTE | 1 | | LIGHT_CLIENT_ATTACK | 2 | -### LastCommitInfo - -* **Fields**: - - | Name | Type | Description | Field Number | - |-------|--------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------| - | round | int32 | Commit round. Reflects the total amount of rounds it took to come to consensus for the current block. | 1 | - | votes | repeated [VoteInfo](#voteinfo) | List of validators addresses in the last validator set with their voting power and whether or not they signed a vote. | 2 | - ### ConsensusParams * **Fields**: | Name | Type | Description | Field Number | |-----------|---------------------------------------------------------------|------------------------------------------------------------------------------|--------------| - | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | + | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | - | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | + | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | ### ProofOps @@ -790,18 +773,47 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |-----------------------------|-------------------------|---------------------------------------------------------------|--------------| - | validator | [Validator](#validator) | A validator | 1 | - | signed_last_block | bool | Indicates whether or not the validator signed the last block | 2 | - | tendermint_signed_extension | bytes | Indicates whether or not the validator signed the last block | 3 | - | app_signed_extension | bytes | Indicates whether or not the validator signed the last block | 3 | + | Name | Type | Description | Field Number | + |-----------------------------|-------------------------|----------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | The validator that sent the vote. | 1 | + | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | * **Usage**: - * Indicates whether a validator signed the last block, allowing for rewards - based on validator availability - * `tendermint_signed_extension` conveys the part of the validator's vote extension that was signed by Tendermint. - * `app_signed_extension` conveys the optional *app_signed* part of the validator's vote extension. + * Indicates whether a validator signed the last block, allowing for rewards based on validator availability. + * This information is typically extracted from a proposed or decided block. + +### ExtendedVoteInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------------------|-------------------------|------------------------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | The validator that sent the vote. | 1 | + | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | + | vote_extension | bytes | Non-deterministic extension provided by the sending validator's Application. | 3 | + +* **Usage**: + * Indicates whether a validator signed the last block, allowing for rewards based on validator availability. + * This information is extracted from Tendermint's data structures in the local process. + * `vote_extension` contains the sending validator's vote extension, which is signed by Tendermint. It can be empty + +### CommitInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------|--------------------------------|----------------------------------------------------------------------------------------------|--------------| + | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | + | votes | repeated [VoteInfo](#voteinfo) | List of validators' addresses in the last validator set with their voting information. | 2 | + +### ExtendedCommitInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------|------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|--------------| + | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | + | votes | repeated [ExtendedVoteInfo](#extendedvoteinfo) | List of validators' addresses in the last validator set with their voting information, including vote extensions. | 2 | ### ExecTxResult @@ -815,7 +827,7 @@ Most of the data structures used in ABCI are shared [common data structures](../ | info | string | Additional information. **May be non-deterministic.** | 4 | | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | | gas_used | int64 | Amount of gas consumed by transaction. | 6 | - | tx_events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | + | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | | codespace | string | Namespace for the `code`. | 8 | ### TxAction @@ -843,3 +855,23 @@ Most of the data structures used in ABCI are shared [common data structures](../ |------------|-----------------------|------------------------------------------------------------------|--------------| | action | [TxAction](#txaction) | What should Tendermint do with this transaction? | 1 | | tx | bytes | Transaction contents | 2 | + +### CanonicalVoteExtension + +>**TODO**: This protobuf message definition is not part of the ABCI++ interface, but rather belongs to the +> Precommit message which is broadcast via P2P. So it is to be moved to the relevant section of the spec. + +* **Fields**: + + | Name | Type | Description | Field Number | + |-----------|--------|--------------------------------------------------------------------------------------------|--------------| + | extension | bytes | Vote extension provided by the Application. | 1 | + | height | int64 | Height in which the extension was provided. | 2 | + | round | int32 | Round in which the extension was provided. | 3 | + | chain_id | string | ID of the blockchain running consensus. | 4 | + | address | bytes | [Address](../core/data_structures.md#address) of the validator that provided the extension | 5 | + +* **Usage**: + * Tendermint is to sign the whole data structure and attach it to a Precommit message + * Upon reception, Tendermint validates the sender's signature and sanity-checks the values of `height`, `round`, and `chain_id`. + Then it sends `extension` to the Application via `RequestVerifyVoteExtension` for verification. diff --git a/spec/abci++/abci++_tmint_expected_behavior_002_draft.md b/spec/abci++/abci++_tmint_expected_behavior_002_draft.md index c408d0ab4..18669a479 100644 --- a/spec/abci++/abci++_tmint_expected_behavior_002_draft.md +++ b/spec/abci++/abci++_tmint_expected_behavior_002_draft.md @@ -10,7 +10,8 @@ title: Tendermint's expected behavior This section describes what the Application can expect from Tendermint. The Tendermint consensus algorithm is designed to protect safety under any network conditions, as long as -less than 1/3 of validators' voting power is byzantine. Most of the time, though, the network will behave synchronously and there will be no byzantine process. In these frequent, benign conditions: +less than 1/3 of validators' voting power is byzantine. Most of the time, though, the network will behave +synchronously and there will be no byzantine process. In these frequent, benign conditions: * Tendermint will decide in round 0; * `PrepareProposal` will be called exactly once at the proposer process of round 0, height _h_; diff --git a/spec/abci/apps.md b/spec/abci/apps.md index 030a3d3c3..d6ec19832 100644 --- a/spec/abci/apps.md +++ b/spec/abci/apps.md @@ -346,6 +346,19 @@ a block minus it's overhead ( ~ `MaxBytes`). Must have `MaxNum > 0`. +### SynchronyParams.Precision + +`SynchronyParams.Precision` is a parameter of the Proposer-Based Timestamps algorithm. +that configures the acceptable upper-bound of clock drift among +all of the nodes on a Tendermint network. Any two nodes on a Tendermint network +are expected to have clocks that differ by at most `Precision`. + +### SynchronyParams.MessageDelay + +`SynchronyParams.MessageDelay` is a parameter of the Proposer-Based Timestamps +algorithm that configures the acceptable upper-bound for transmitting a `Proposal` +message from the proposer to all of the validators on the network. + ### Updates The application may set the ConsensusParams during InitChain, and update them during diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md index d8cc96e28..0aca40519 100644 --- a/spec/core/data_structures.md +++ b/spec/core/data_structures.md @@ -230,17 +230,20 @@ enum BlockIDFlag { A vote is a signed message from a validator for a particular block. The vote includes information about the validator signing it. When stored in the blockchain or propagated over the network, votes are encoded in Protobuf. - -| Name | Type | Description | Validation | -|------------------|---------------------------------|---------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------| -| Type | [SignedMsgType](#signedmsgtype) | Either prevote or precommit. [SignedMsgType](#signedmsgtype) | A Vote is valid if its corresponding fields are included in the enum [signedMsgType](#signedmsgtype) | -| Height | uint64 | Height for which this vote was created for | Must be > 0 | -| Round | int32 | Round that the commit corresponds to. | Must be > 0 | -| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | -| Timestamp | [Time](#Time) | Timestamp represents the time at which a validator signed. | [Time](#time) | -| ValidatorAddress | slice of bytes (`[]byte`) | Address of the validator | Length must be equal to 20 | -| ValidatorIndex | int32 | Index at a specific block height that corresponds to the Index of the validator in the set. | must be > 0 | -| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | +The vote extension is not part of the [`CanonicalVote`](#canonicalvote). + +| Name | Type | Description | Validation | +|--------------------|---------------------------------|---------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------| +| Type | [SignedMsgType](#signedmsgtype) | Either prevote or precommit. [SignedMsgType](#signedmsgtype) | A Vote is valid if its corresponding fields are included in the enum [signedMsgType](#signedmsgtype) | +| Height | uint64 | Height for which this vote was created. | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | +| Timestamp | [Time](#Time) | The time at which a validator signed. | [Time](#time) | +| ValidatorAddress | slice of bytes (`[]byte`) | Address of the validator | Length must be equal to 20 | +| ValidatorIndex | int32 | Index at a specific block height that corresponds to the Index of the validator in the set. | must be > 0 | +| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | +| Extension | slice of bytes (`[]byte`) | The vote extension provided by the Application. Only valid for precommit messages. | Length must be 0 if Type != `SIGNED_MSG_TYPE_PRECOMMIT` | +| ExtensionSignature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length must be 0 if Type != `SIGNED_MSG_TYPE_PRECOMMIT`; else length must be > 0 and < 64 | ## CanonicalVote @@ -250,7 +253,7 @@ the fields. ```proto message CanonicalVote { SignedMsgType type = 1; - fixed64 height = 2; + fixed64 height = 2; sfixed64 round = 3; CanonicalBlockID block_id = 4; google.protobuf.Timestamp timestamp = 5; diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index c5e328053..5739f06e5 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -90,8 +90,13 @@ func NewApplication(cfg *Config) (*Application, error) { if err != nil { return nil, err } + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + return nil, err + } + return &Application{ - logger: log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo), + logger: logger, state: state, snapshots: snapshots, cfg: cfg, @@ -155,7 +160,7 @@ func (app *Application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { // FinalizeBlock implements ABCI. func (app *Application) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock { - var txs = make([]*abci.ResponseDeliverTx, len(req.Txs)) + var txs = make([]*abci.ExecTxResult, len(req.Txs)) app.mu.Lock() defer app.mu.Unlock() @@ -167,16 +172,16 @@ func (app *Application) FinalizeBlock(req abci.RequestFinalizeBlock) abci.Respon } app.state.Set(key, value) - txs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK} + txs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK} } - valUpdates, err := app.validatorUpdates(uint64(req.Height)) + valUpdates, err := app.validatorUpdates(uint64(req.Header.Height)) if err != nil { panic(err) } return abci.ResponseFinalizeBlock{ - Txs: txs, + TxResults: txs, ValidatorUpdates: valUpdates, Events: []abci.Event{ { @@ -188,7 +193,7 @@ func (app *Application) FinalizeBlock(req abci.RequestFinalizeBlock) abci.Respon }, { Key: "height", - Value: strconv.Itoa(int(req.Height)), + Value: strconv.Itoa(int(req.Header.Height)), }, }, }, @@ -300,7 +305,8 @@ func (app *Application) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) a } func (app *Application) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { - return abci.ResponsePrepareProposal{BlockData: req.BlockData} + // None of the transactions are modified by this application. + return abci.ResponsePrepareProposal{ModifiedTx: false} } // ProcessProposal implements part of the Application interface. diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 10b809279..bec78d89c 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -2,7 +2,9 @@ package main import ( + "context" "fmt" + stdlog "log" "math/rand" "os" "path/filepath" @@ -17,21 +19,35 @@ const ( randomSeed int64 = 4827085738 ) -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) - func main() { - NewCLI().Run() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cli, err := NewCLI() + if err != nil { + stdlog.Fatal(err) + } + + cli.Run(ctx) } // CLI is the Cobra-based command-line interface. type CLI struct { - root *cobra.Command - opts Options + root *cobra.Command + opts Options + logger log.Logger } // NewCLI sets up the CLI. -func NewCLI() *CLI { - cli := &CLI{} +func NewCLI() (*CLI, error) { + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + return nil, err + } + + cli := &CLI{ + logger: logger, + } cli.root = &cobra.Command{ Use: "generator", Short: "End-to-end testnet generator", @@ -51,7 +67,7 @@ func NewCLI() *CLI { cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0, "Maxmum network size (nodes), 0 is unlimited") - return cli + return cli, nil } // generate generates manifests in a directory. @@ -90,9 +106,9 @@ func (cli *CLI) generate() error { } // Run runs the CLI. -func (cli *CLI) Run() { - if err := cli.root.Execute(); err != nil { - logger.Error(err.Error()) +func (cli *CLI) Run(ctx context.Context) { + if err := cli.root.ExecuteContext(ctx); err != nil { + cli.logger.Error(err.Error()) os.Exit(1) } } diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 704cc06bb..54b9ef533 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -34,8 +34,6 @@ import ( e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) - // main is the binary entrypoint. func main() { ctx, cancel := context.WithCancel(context.Background()) @@ -51,7 +49,6 @@ func main() { } if err := run(ctx, configFile); err != nil { - logger.Error(err.Error()) os.Exit(1) } } @@ -63,9 +60,20 @@ func run(ctx context.Context, configFile string) error { return err } + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + // have print here because we can't log (yet), use the logger + // everywhere else. + fmt.Fprintln(os.Stderr, "ERROR:", err) + return err + } + // Start remote signer (must start before node if running builtin). if cfg.PrivValServer != "" { - if err = startSigner(ctx, cfg); err != nil { + if err = startSigner(ctx, logger, cfg); err != nil { + logger.Error("starting signer", + "server", cfg.PrivValServer, + "err", err) return err } if cfg.Protocol == "builtin" { @@ -76,11 +84,11 @@ func run(ctx context.Context, configFile string) error { // Start app server. switch cfg.Protocol { case "socket", "grpc": - err = startApp(ctx, cfg) + err = startApp(ctx, logger, cfg) case "builtin": switch cfg.Mode { case string(e2e.ModeLight): - err = startLightNode(ctx, cfg) + err = startLightNode(ctx, logger, cfg) case string(e2e.ModeSeed): err = startSeedNode(ctx) default: @@ -90,6 +98,10 @@ func run(ctx context.Context, configFile string) error { err = fmt.Errorf("invalid protocol %q", cfg.Protocol) } if err != nil { + logger.Error("starting node", + "protocol", cfg.Protocol, + "mode", cfg.Mode, + "err", err) return err } @@ -100,7 +112,7 @@ func run(ctx context.Context, configFile string) error { } // startApp starts the application server, listening for connections from Tendermint. -func startApp(ctx context.Context, cfg *Config) error { +func startApp(ctx context.Context, logger log.Logger, cfg *Config) error { app, err := app.NewApplication(cfg.App()) if err != nil { return err @@ -136,7 +148,7 @@ func startNode(ctx context.Context, cfg *Config) error { ctx, tmcfg, nodeLogger, - abciclient.NewLocalCreator(app), + abciclient.NewLocalClient(nodeLogger, app), nil, ) if err != nil { @@ -160,7 +172,7 @@ func startSeedNode(ctx context.Context) error { return n.Start(ctx) } -func startLightNode(ctx context.Context, cfg *Config) error { +func startLightNode(ctx context.Context, logger log.Logger, cfg *Config) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return err @@ -218,7 +230,7 @@ func startLightNode(ctx context.Context, cfg *Config) error { } // startSigner starts a signer server connecting to the given endpoint. -func startSigner(ctx context.Context, cfg *Config) error { +func startSigner(ctx context.Context, logger log.Logger, cfg *Config) error { filePV, err := privval.LoadFilePV(cfg.PrivValKey, cfg.PrivValState) if err != nil { return err diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index ba60d72cc..8be90f0c2 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -15,9 +15,9 @@ var getMp func() mempool.Mempool func init() { app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - appConnMem, _ := cc(log.NewNopLogger()) - err := appConnMem.Start(context.TODO()) + logger := log.NewNopLogger() + conn := abciclient.NewLocalClient(logger, app) + err := conn.Start(context.TODO()) if err != nil { panic(err) } @@ -27,13 +27,7 @@ func init() { getMp = func() mempool.Mempool { if mp == nil { - mp = mempool.NewTxMempool( - log.NewNopLogger(), - cfg, - appConnMem, - 0, - ) - + mp = mempool.NewTxMempool(logger, cfg, conn) } return mp } diff --git a/third_party/proto/gogoproto/gogo.proto b/third_party/proto/gogoproto/gogo.proto deleted file mode 100644 index 31c516cd0..000000000 --- a/third_party/proto/gogoproto/gogo.proto +++ /dev/null @@ -1,147 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copied from https://github.com/gogo/protobuf/blob/master/gogoproto/gogo.proto -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; - - optional bool goproto_sizecache_all = 63034; - optional bool goproto_unkeyed_all = 63035; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; - - optional bool goproto_sizecache = 64034; - optional bool goproto_unkeyed = 64035; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; - optional bool wktpointer = 65012; - - optional string castrepeated = 65013; -} diff --git a/tools/tools.go b/tools/tools.go index 0e61333ec..9fc291d99 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools // This file uses the recommended method for tracking developer tools in a go module. diff --git a/types/events.go b/types/events.go index d20ecfa93..cd535e71b 100644 --- a/types/events.go +++ b/types/events.go @@ -270,12 +270,11 @@ const ( // see EventBus#PublishEventTx TxHeightKey = "tx.height" - // BlockHeightKey is a reserved key used for indexing BeginBlock and Endblock - // events. + // BlockHeightKey is a reserved key used for indexing FinalizeBlock events. BlockHeightKey = "block.height" - EventTypeBeginBlock = "begin_block" - EventTypeEndBlock = "end_block" + // EventTypeFinalizeBlock is a reserved key used for indexing FinalizeBlock events. + EventTypeFinalizeBlock = "finalize_block" ) var ( diff --git a/types/results.go b/types/results.go deleted file mode 100644 index 9181450bc..000000000 --- a/types/results.go +++ /dev/null @@ -1,54 +0,0 @@ -package types - -import ( - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/merkle" -) - -// ABCIResults wraps the deliver tx results to return a proof. -type ABCIResults []*abci.ResponseDeliverTx - -// NewResults strips non-deterministic fields from ResponseDeliverTx responses -// and returns ABCIResults. -func NewResults(responses []*abci.ResponseDeliverTx) ABCIResults { - res := make(ABCIResults, len(responses)) - for i, d := range responses { - res[i] = deterministicResponseDeliverTx(d) - } - return res -} - -// Hash returns a merkle hash of all results. -func (a ABCIResults) Hash() []byte { - return merkle.HashFromByteSlices(a.toByteSlices()) -} - -// ProveResult returns a merkle proof of one result from the set -func (a ABCIResults) ProveResult(i int) merkle.Proof { - _, proofs := merkle.ProofsFromByteSlices(a.toByteSlices()) - return *proofs[i] -} - -func (a ABCIResults) toByteSlices() [][]byte { - l := len(a) - bzs := make([][]byte, l) - for i := 0; i < l; i++ { - bz, err := a[i].Marshal() - if err != nil { - panic(err) - } - bzs[i] = bz - } - return bzs -} - -// deterministicResponseDeliverTx strips non-deterministic fields from -// ResponseDeliverTx and returns another ResponseDeliverTx. -func deterministicResponseDeliverTx(response *abci.ResponseDeliverTx) *abci.ResponseDeliverTx { - return &abci.ResponseDeliverTx{ - Code: response.Code, - Data: response.Data, - GasWanted: response.GasWanted, - GasUsed: response.GasUsed, - } -} diff --git a/types/results_test.go b/types/results_test.go deleted file mode 100644 index 5b1be3466..000000000 --- a/types/results_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" -) - -func TestABCIResults(t *testing.T) { - a := &abci.ResponseDeliverTx{Code: 0, Data: nil} - b := &abci.ResponseDeliverTx{Code: 0, Data: []byte{}} - c := &abci.ResponseDeliverTx{Code: 0, Data: []byte("one")} - d := &abci.ResponseDeliverTx{Code: 14, Data: nil} - e := &abci.ResponseDeliverTx{Code: 14, Data: []byte("foo")} - f := &abci.ResponseDeliverTx{Code: 14, Data: []byte("bar")} - - // Nil and []byte{} should produce the same bytes - bzA, err := a.Marshal() - require.NoError(t, err) - bzB, err := b.Marshal() - require.NoError(t, err) - - require.Equal(t, bzA, bzB) - - // a and b should be the same, don't go in results. - results := ABCIResults{a, c, d, e, f} - - // Make sure each result serializes differently - last := []byte{} - assert.Equal(t, last, bzA) // first one is empty - for i, res := range results[1:] { - bz, err := res.Marshal() - require.NoError(t, err) - - assert.NotEqual(t, last, bz, "%d", i) - last = bz - } - - // Make sure that we can get a root hash from results and verify proofs. - root := results.Hash() - assert.NotEmpty(t, root) - - for i, res := range results { - bz, err := res.Marshal() - require.NoError(t, err) - - proof := results.ProveResult(i) - valid := proof.Verify(root, bz) - assert.NoError(t, valid, "%d", i) - } -} diff --git a/types/tx.go b/types/tx.go index 746252238..2dd7d3a51 100644 --- a/types/tx.go +++ b/types/tx.go @@ -5,7 +5,9 @@ import ( "crypto/sha256" "errors" "fmt" + "sort" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" tmbytes "github.com/tendermint/tendermint/libs/bytes" @@ -32,13 +34,8 @@ type Txs []Tx // Hash returns the Merkle root hash of the transaction hashes. // i.e. the leaves of the tree are the hashes of the txs. func (txs Txs) Hash() []byte { - // These allocations will be removed once Txs is switched to [][]byte, - // ref #2603. This is because golang does not allow type casting slices without unsafe - txBzs := make([][]byte, len(txs)) - for i := 0; i < len(txs); i++ { - txBzs[i] = txs[i].Hash() - } - return merkle.HashFromByteSlices(txBzs) + hl := txs.hashList() + return merkle.HashFromByteSlices(hl) } // Index returns the index of this transaction in the list, or -1 if not found @@ -61,16 +58,9 @@ func (txs Txs) IndexByHash(hash []byte) int { return -1 } -// Proof returns a simple merkle proof for this node. -// Panics if i < 0 or i >= len(txs) -// TODO: optimize this! func (txs Txs) Proof(i int) TxProof { - l := len(txs) - bzs := make([][]byte, l) - for i := 0; i < l; i++ { - bzs[i] = txs[i].Hash() - } - root, proofs := merkle.ProofsFromByteSlices(bzs) + hl := txs.hashList() + root, proofs := merkle.ProofsFromByteSlices(hl) return TxProof{ RootHash: root, @@ -79,11 +69,23 @@ func (txs Txs) Proof(i int) TxProof { } } +func (txs Txs) hashList() [][]byte { + hl := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + hl[i] = txs[i].Hash() + } + return hl +} + +// Txs is a slice of transactions. Sorting a Txs value orders the transactions +// lexicographically. +func (txs Txs) Len() int { return len(txs) } +func (txs Txs) Swap(i, j int) { txs[i], txs[j] = txs[j], txs[i] } +func (txs Txs) Less(i, j int) bool { + return bytes.Compare(txs[i], txs[j]) == -1 +} + // ToSliceOfBytes converts a Txs to slice of byte slices. -// -// NOTE: This method should become obsolete once Txs is switched to [][]byte. -// ref: #2603 -// TODO This function is to disappear when TxRecord is introduced func (txs Txs) ToSliceOfBytes() [][]byte { txBzs := make([][]byte, len(txs)) for i := 0; i < len(txs); i++ { @@ -92,14 +94,182 @@ func (txs Txs) ToSliceOfBytes() [][]byte { return txBzs } -// ToTxs converts a raw slice of byte slices into a Txs type. -// TODO This function is to disappear when TxRecord is introduced -func ToTxs(txs [][]byte) Txs { - txBzs := make(Txs, len(txs)) - for i := 0; i < len(txs); i++ { - txBzs[i] = txs[i] +// TxRecordSet contains indexes into an underlying set of transactions. +// These indexes are useful for validating and working with a list of TxRecords +// from the PrepareProposal response. +// +// Only one copy of the original data is referenced by all of the indexes but a +// transaction may appear in multiple indexes. +type TxRecordSet struct { + // all holds the complete list of all transactions from the original list of + // TxRecords. + all Txs + + // included is an index of the transactions that will be included in the block + // and is constructed from the list of both added and unmodified transactions. + // included maintains the original order that the transactions were present + // in the list of TxRecords. + included Txs + + // added, unmodified, removed, and unknown are indexes for each of the actions + // that may be supplied with a transaction. + // + // Because each transaction only has one action, it can be referenced by + // at most 3 indexes in this data structure: the action-specific index, the + // included index, and the all index. + added Txs + unmodified Txs + removed Txs + unknown Txs +} + +// NewTxRecordSet constructs a new set from the given transaction records. +// The contents of the input transactions are shared by the set, and must not +// be modified during the lifetime of the set. +func NewTxRecordSet(trs []*abci.TxRecord) TxRecordSet { + txrSet := TxRecordSet{ + all: make([]Tx, len(trs)), } - return txBzs + for i, tr := range trs { + + txrSet.all[i] = Tx(tr.Tx) + + // The following set of assignments do not allocate new []byte, they create + // pointers to the already allocated slice. + switch tr.GetAction() { + case abci.TxRecord_UNKNOWN: + txrSet.unknown = append(txrSet.unknown, txrSet.all[i]) + case abci.TxRecord_UNMODIFIED: + txrSet.unmodified = append(txrSet.unmodified, txrSet.all[i]) + txrSet.included = append(txrSet.included, txrSet.all[i]) + case abci.TxRecord_ADDED: + txrSet.added = append(txrSet.added, txrSet.all[i]) + txrSet.included = append(txrSet.included, txrSet.all[i]) + case abci.TxRecord_REMOVED: + txrSet.removed = append(txrSet.removed, txrSet.all[i]) + } + } + return txrSet +} + +// IncludedTxs returns the transactions marked for inclusion in a block. This +// list maintains the order that the transactions were included in the list of +// TxRecords that were used to construct the TxRecordSet. +func (t TxRecordSet) IncludedTxs() []Tx { + return t.included +} + +// AddedTxs returns the transactions added by the application. +func (t TxRecordSet) AddedTxs() []Tx { + return t.added +} + +// RemovedTxs returns the transactions marked for removal by the application. +func (t TxRecordSet) RemovedTxs() []Tx { + return t.removed +} + +// Validate checks that the record set was correctly constructed from the original +// list of transactions. +func (t TxRecordSet) Validate(maxSizeBytes int64, otxs Txs) error { + if len(t.unknown) > 0 { + return fmt.Errorf("%d transactions marked unknown (first unknown hash: %x)", len(t.unknown), t.unknown[0].Hash()) + } + + // The following validation logic performs a set of sorts on the data in the TxRecordSet indexes. + // It sorts the original transaction list, otxs, once. + // It sorts the new transaction list twice: once when sorting 'all', the total list, + // and once by sorting the set of the added, removed, and unmodified transactions indexes, + // which, when combined, comprise the complete list of modified transactions. + // + // Each of the added, removed, and unmodified indices is then iterated and once + // and each value index is checked against the sorted original list for containment. + // Asymptotically, this yields a total runtime of O(N*log(N) + 2*M*log(M) + M*log(N)). + // in the input size of the original list, N, and the input size of the new list, M, respectively. + // Performance gains are likely possible, but this was preferred for readability and maintainability. + + // Sort a copy of the complete transaction slice so we can check for + // duplication. The copy is so we do not change the original ordering. + // Only the slices are copied, the transaction contents are shared. + allCopy := sortedCopy(t.all) + + var size int64 + for i, cur := range allCopy { + size += int64(len(cur)) + if size > maxSizeBytes { + return fmt.Errorf("transaction data size %d exceeds maximum %d", size, maxSizeBytes) + } + + // allCopy is sorted, so any duplicated data will be adjacent. + if i+1 < len(allCopy) && bytes.Equal(cur, allCopy[i+1]) { + return fmt.Errorf("found duplicate transaction with hash: %x", cur.Hash()) + } + } + + // create copies of each of the action-specific indexes so that order of the original + // indexes can be preserved. + addedCopy := sortedCopy(t.added) + removedCopy := sortedCopy(t.removed) + unmodifiedCopy := sortedCopy(t.unmodified) + + // make a defensive copy of otxs so that the order of + // the caller's data is not altered. + otxsCopy := sortedCopy(otxs) + + if ix, ok := containsAll(otxsCopy, unmodifiedCopy); !ok { + return fmt.Errorf("new transaction incorrectly marked as removed, transaction hash: %x", unmodifiedCopy[ix].Hash()) + } + + if ix, ok := containsAll(otxsCopy, removedCopy); !ok { + return fmt.Errorf("new transaction incorrectly marked as removed, transaction hash: %x", removedCopy[ix].Hash()) + } + if ix, ok := containsAny(otxsCopy, addedCopy); ok { + return fmt.Errorf("existing transaction incorrectly marked as added, transaction hash: %x", addedCopy[ix].Hash()) + } + return nil +} + +func sortedCopy(txs Txs) Txs { + cp := make(Txs, len(txs)) + copy(cp, txs) + sort.Sort(cp) + return cp +} + +// containsAny checks that list a contains one of the transactions in list +// b. If a match is found, the index in b of the matching transaction is returned. +// Both lists must be sorted. +func containsAny(a, b []Tx) (int, bool) { + for i, cur := range b { + if _, ok := contains(a, cur); ok { + return i, true + } + } + return -1, false +} + +// containsAll checks that super contains all of the transactions in the sub +// list. If not all values in sub are present in super, the index in sub of the +// first Tx absent from super is returned. +func containsAll(super, sub Txs) (int, bool) { + for i, cur := range sub { + if _, ok := contains(super, cur); !ok { + return i, false + } + } + return -1, true +} + +// contains checks that the sorted list, set contains elem. If set does contain elem, then the +// index in set of elem is returned. +func contains(set []Tx, elem Tx) (int, bool) { + n := sort.Search(len(set), func(i int) bool { + return bytes.Compare(elem, set[i]) <= 0 + }) + if n == len(set) || !bytes.Equal(elem, set[n]) { + return -1, false + } + return n, true } // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. diff --git a/types/tx_test.go b/types/tx_test.go index e2f12772a..d8737e9f0 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -2,12 +2,13 @@ package types import ( "bytes" - mrand "math/rand" + "math/rand" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" ctest "github.com/tendermint/tendermint/internal/libs/test" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -21,11 +22,6 @@ func makeTxs(cnt, size int) Txs { return txs } -func randInt(low, high int) int { - off := mrand.Int() % (high - low) - return low + off -} - func TestTxIndex(t *testing.T) { for i := 0; i < 20; i++ { txs := makeTxs(15, 60) @@ -52,6 +48,160 @@ func TestTxIndexByHash(t *testing.T) { } } +func TestValidateTxRecordSet(t *testing.T) { + t.Run("should error on total transaction size exceeding max data size", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{6, 7, 8, 9, 10}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(9, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on duplicate transactions with the same action", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{100}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{200}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on duplicate transactions with mixed actions", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{100}), + }, + { + Action: abci.TxRecord_REMOVED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{200}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on new transactions marked UNMODIFIED", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_UNMODIFIED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on new transactions marked REMOVED", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_REMOVED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on existing transaction marked as ADDED", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{5, 4, 3, 2, 1}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{6}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{{0}, {1, 2, 3, 4, 5}}) + require.Error(t, err) + }) + t.Run("should error if any transaction marked as UNKNOWN", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_UNKNOWN, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("TxRecordSet preserves order", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{100}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{99}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{55}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{12}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{66}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{9}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{17}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.NoError(t, err) + for i, tx := range txrSet.IncludedTxs() { + require.Equal(t, Tx(trs[i].Tx), tx) + } + }) +} + func TestValidTxProof(t *testing.T) { cases := []struct { txs Txs @@ -150,3 +300,7 @@ func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { } } } + +func randInt(low, high int) int { + return rand.Intn(high-low) + low +} diff --git a/types/validation.go b/types/validation.go index e8f53f2a0..8655bdabd 100644 --- a/types/validation.go +++ b/types/validation.go @@ -15,11 +15,13 @@ func shouldBatchVerify(vals *ValidatorSet, commit *Commit) bool { return len(commit.Signatures) >= batchVerifyThreshold && batch.SupportsBatchVerifier(vals.GetProposer().PubKey) } +// TODO(wbanfield): determine if the following comment is still true regarding Gaia. + // VerifyCommit verifies +2/3 of the set had signed the given commit. // // It checks all the signatures! While it's safe to exit as soon as we have // 2/3+ signatures, doing so would impact incentivization logic in the ABCI -// application that depends on the LastCommitInfo sent in BeginBlock, which +// application that depends on the LastCommitInfo sent in FinalizeBlock, which // includes which validators signed. For instance, Gaia incentivizes proposers // with a bonus for including more than +2/3 of the signatures. func VerifyCommit(chainID string, vals *ValidatorSet, blockID BlockID, diff --git a/types/vote_set.go b/types/vote_set.go index bb675e110..438d089b3 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -227,6 +227,9 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok } func (voteSet *VoteSet) GetVotes() []*Vote { + if voteSet == nil { + return nil + } return voteSet.votes } diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 8baa74172..4de9b1837 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -493,7 +493,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { // Ensure that Commit is good. if err := commit.ValidateBasic(); err != nil { - t.Errorf("error in Commit.ValidateBasic(): %w", err) + t.Errorf("error in Commit.ValidateBasic(): %v", err) } }