From e81b0e290ec5972015885e46bd1c7a1ab31bc51c Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 11 Feb 2022 14:31:57 +0100 Subject: [PATCH] spec: merge spec repo into tendermint repo (#7804) --- .github/workflows/action.yml | 17 - .github/workflows/e2e-manual.yml | 36 + .github/workflows/e2e-nightly-34x.yml | 17 - .github/workflows/e2e-nightly-35x.yml | 1 - .github/workflows/e2e-nightly-master.yml | 1 - .github/workflows/lint.yml | 2 +- .github/workflows/linter.yml | 2 +- .github/workflows/markdown-links.yml | 18 + .gitignore | 9 +- .markdownlint.yml | 4 +- CHANGELOG_PENDING.md | 2 +- Makefile | 2 +- abci/client/client.go | 5 +- abci/client/grpc_client.go | 48 +- abci/client/local_client.go | 58 +- abci/client/mocks/client.go | 115 +- abci/client/socket_client.go | 59 +- abci/client/socket_client_test.go | 6 +- abci/cmd/abci-cli/abci-cli.go | 59 +- abci/example/example_test.go | 92 +- abci/example/kvstore/kvstore.go | 17 +- abci/example/kvstore/kvstore_test.go | 59 +- abci/example/kvstore/persistent_kvstore.go | 46 +- abci/server/socket_server.go | 12 +- abci/tests/server/client.go | 32 +- abci/types/application.go | 51 +- abci/types/messages.go | 48 +- abci/types/types.pb.go | 3714 +++++++---------- cmd/tendermint/commands/completion.go | 46 + cmd/tendermint/commands/key_migrate.go | 1 + cmd/tendermint/commands/reindex_event.go | 9 +- cmd/tendermint/commands/reindex_event_test.go | 11 +- cmd/tendermint/commands/replay.go | 1 + cmd/tendermint/commands/rollback_test.go | 16 +- cmd/tendermint/commands/root_test.go | 33 +- cmd/tendermint/commands/run_node.go | 2 +- cmd/tendermint/commands/show_node_id.go | 1 + cmd/tendermint/main.go | 4 +- config/toml.go | 8 +- config/toml_test.go | 6 +- crypto/ed25519/bench_test.go | 1 + crypto/secp256k1/secp256k1.go | 67 +- crypto/secp256k1/secp256k1_nocgo.go | 76 - crypto/sr25519/bench_test.go | 1 + .../adr-071-proposer-based-timestamps.md | 6 +- .../architecture/adr-077-block-retention.md | 5 +- .../architecture/adr-078-nonzero-genesis.md | 3 +- .../adr-079-ed25519-verification.md | 5 +- .../architecture/adr-080-reverse-sync.md | 3 +- .../architecture/img}/block-retention.png | Bin docs/pre.sh | 2 +- docs/rfc/README.md | 5 +- {rfc => docs/rfc}/images/abci++.png | Bin {rfc => docs/rfc}/images/abci.png | Bin docs/rfc/rfc-011-abci++.md | 257 ++ docs/rfc/rfc-011-delete-gas.md | 162 + docs/rfc/rfc-012-semantic-versioning.md | 98 + .../rfc/rfc-013-abci++.md | 5 +- .../rfc/rfc-014-semantic-versioning.md | 3 +- docs/roadmap/roadmap.md | 4 +- docs/tendermint-core/block-structure.md | 2 +- docs/tendermint-core/consensus/README.md | 2 +- docs/tendermint-core/subscription.md | 2 +- go.mod | 3 +- go.sum | 6 +- internal/blocksync/pool_test.go | 14 +- internal/blocksync/reactor_test.go | 8 +- internal/consensus/README.md | 3 - internal/consensus/byzantine_test.go | 6 +- internal/consensus/common_test.go | 27 +- internal/consensus/invalid_test.go | 1 + internal/consensus/mempool_test.go | 32 +- internal/consensus/metrics.go | 1 + internal/consensus/mocks/cons_sync_reactor.go | 1 + internal/consensus/mocks/fast_sync_reactor.go | 1 + internal/consensus/reactor_test.go | 2 +- internal/consensus/replay_stubs.go | 11 +- internal/consensus/replay_test.go | 45 +- internal/consensus/state.go | 4 +- .../consensus/types/height_vote_set_test.go | 28 +- .../consensus/types/peer_round_state_test.go | 1 + internal/consensus/wal_generator.go | 2 +- internal/eventbus/event_bus.go | 4 +- internal/eventbus/event_bus_test.go | 42 +- internal/evidence/doc.go | 2 +- internal/evidence/mocks/block_store.go | 1 + internal/inspect/inspect_test.go | 15 +- internal/libs/autofile/autofile_test.go | 12 +- internal/libs/autofile/group_test.go | 7 +- internal/libs/sync/closer.go | 31 - internal/libs/sync/closer_test.go | 28 - internal/libs/tempfile/tempfile_test.go | 2 +- internal/mempool/ids_test.go | 1 + internal/mempool/mempool_bench_test.go | 11 +- internal/mempool/mempool_test.go | 2 +- internal/mempool/reactor_test.go | 3 +- internal/mempool/tx_test.go | 1 + internal/p2p/channel.go | 1 + internal/p2p/metrics_test.go | 1 + internal/p2p/p2ptest/util.go | 1 + internal/p2p/pqueue.go | 48 +- internal/p2p/pqueue_test.go | 1 + internal/p2p/queue.go | 30 +- internal/p2p/router_filter_test.go | 4 +- internal/p2p/router_init_test.go | 1 + internal/p2p/router_test.go | 16 +- internal/p2p/transport_mconn.go | 32 +- internal/p2p/transport_mconn_test.go | 3 - internal/p2p/transport_memory.go | 45 +- internal/p2p/transport_memory_test.go | 1 + internal/proxy/app_conn.go | 31 +- internal/proxy/app_conn_test.go | 1 + internal/proxy/mocks/app_conn_consensus.go | 86 +- internal/pubsub/pubsub.go | 3 + internal/pubsub/pubsub_test.go | 1 + internal/pubsub/subscription.go | 1 + internal/rpc/core/blocks.go | 11 +- internal/rpc/core/blocks_test.go | 21 +- internal/rpc/core/env.go | 1 + internal/state/execution.go | 92 +- internal/state/helpers_test.go | 40 +- internal/state/indexer/block/kv/kv.go | 9 +- internal/state/indexer/block/kv/kv_test.go | 37 +- internal/state/indexer/block/kv/util.go | 1 + .../state/indexer/indexer_service_test.go | 3 + internal/state/indexer/mocks/event_sink.go | 1 + internal/state/indexer/sink/kv/kv_test.go | 49 +- internal/state/indexer/sink/null/null_test.go | 1 + internal/state/indexer/sink/psql/psql.go | 8 +- internal/state/indexer/sink/psql/psql_test.go | 20 +- internal/state/indexer/tx/kv/kv_bench_test.go | 6 +- internal/state/indexer/tx/kv/kv_test.go | 5 +- internal/state/mocks/event_sink.go | 1 + internal/state/mocks/evidence_pool.go | 1 + internal/state/mocks/store.go | 1 + internal/state/state.go | 1 + internal/state/state_test.go | 112 +- internal/state/store.go | 6 +- internal/state/store_test.go | 26 +- internal/state/test/factory/block.go | 1 + internal/state/time_test.go | 1 + internal/statesync/chunks_test.go | 6 +- internal/statesync/mocks/state_provider.go | 1 + internal/statesync/syncer_test.go | 6 +- internal/store/store_test.go | 20 +- internal/test/factory/block.go | 1 + internal/test/factory/p2p.go | 1 + internal/test/factory/validator.go | 1 + libs/cli/helper.go | 114 +- libs/cli/setup.go | 98 +- libs/cli/setup_test.go | 99 +- libs/events/event_cache.go | 39 - libs/events/event_cache_test.go | 49 - libs/log/default_test.go | 1 + libs/os/os_test.go | 15 +- libs/service/service.go | 133 +- libs/service/service_test.go | 131 +- libs/strings/string.go | 54 +- libs/strings/string_test.go | 64 +- light/client.go | 2 +- light/doc.go | 4 +- light/example_test.go | 36 +- light/helpers_test.go | 1 + light/light_test.go | 22 +- light/provider/http/http_test.go | 2 +- light/rpc/client.go | 18 +- node/node.go | 4 +- node/node_test.go | 36 +- node/seed.go | 2 +- privval/file_test.go | 24 +- privval/grpc/client_test.go | 1 + privval/grpc/util.go | 7 +- privval/socket_dialers_test.go | 2 +- privval/socket_listeners_test.go | 12 +- proto/README.md | 20 +- proto/tendermint/abci/types.proto | 108 +- proto/tendermint/consensus/wal.proto | 2 - proto/tendermint/privval/service.proto | 2 +- proto/tendermint/state/types.pb.go | 249 +- proto/tendermint/state/types.proto | 4 +- proto/tendermint/types/canonical.proto | 3 - proto/tendermint/types/events.proto | 2 - proto/tendermint/types/params.proto | 2 +- rfc/README.md | 30 - rfc/rfc_template.md | 39 - rpc/client/examples_test.go | 5 +- rpc/client/main_test.go | 8 +- rpc/client/mock/abci.go | 7 +- rpc/coretypes/responses.go | 3 +- rpc/coretypes/responses_test.go | 1 + rpc/jsonrpc/client/ws_client.go | 28 +- rpc/jsonrpc/client/ws_client_test.go | 8 - rpc/test/helpers.go | 5 +- rust-spec/fastsync/README.md | 3 - rust-spec/lightclient/README.md | 3 - rust-spec/lightclient/verification/README.md | 3 - spec/README.md | 4 + spec/abci++/README.md | 2 +- spec/blockchain/blockchain.md | 3 - spec/blockchain/encoding.md | 3 - spec/blockchain/readme.md | 14 - spec/blockchain/state.md | 3 - .../proposer-based-timestamp/README.md | 2 +- .../pbts-algorithm_002_draft.md | 2 +- {ivy-proofs => spec/ivy-proofs}/Dockerfile | 0 {ivy-proofs => spec/ivy-proofs}/README.md | 0 .../ivy-proofs}/abstract_tendermint.ivy | 0 .../ivy-proofs}/accountable_safety_1.ivy | 0 .../ivy-proofs}/accountable_safety_2.ivy | 0 .../ivy-proofs}/check_proofs.sh | 0 .../ivy-proofs}/classic_safety.ivy | 0 .../ivy-proofs}/count_lines.sh | 0 .../ivy-proofs}/docker-compose.yml | 1 - .../ivy-proofs}/domain_model.ivy | 0 .../ivy-proofs}/network_shim.ivy | 0 .../ivy-proofs}/output/.gitignore | 0 .../ivy-proofs}/tendermint.ivy | 0 .../ivy-proofs}/tendermint_test.ivy | 0 .../supervisor/supervisor_002_draft.md | 4 +- spec/p2p/messages/consensus.md | 2 +- test/e2e/app/app.go | 25 +- test/e2e/generator/generate_test.go | 1 + test/fuzz/mempool/fuzz_test.go | 1 + test/fuzz/p2p/secretconnection/fuzz_test.go | 1 + test/fuzz/rpc/jsonrpc/server/fuzz_test.go | 1 + types/block.go | 5 +- types/events.go | 8 +- types/genesis_test.go | 2 +- types/node_info.go | 17 +- types/node_info_test.go | 22 +- types/params.go | 2 +- types/proposal.go | 2 +- types/validator_test.go | 1 + types/vote.go | 16 + types/vote_set.go | 4 + 235 files changed, 3703 insertions(+), 4485 deletions(-) delete mode 100644 .github/workflows/action.yml create mode 100644 .github/workflows/e2e-manual.yml create mode 100644 .github/workflows/markdown-links.yml create mode 100644 cmd/tendermint/commands/completion.go delete mode 100644 crypto/secp256k1/secp256k1_nocgo.go rename rfc/001-block-retention.md => docs/architecture/adr-077-block-retention.md (96%) rename rfc/002-nonzero-genesis.md => docs/architecture/adr-078-nonzero-genesis.md (96%) rename rfc/003-ed25519-verification.md => docs/architecture/adr-079-ed25519-verification.md (94%) rename rfc/005-reverse-sync.md => docs/architecture/adr-080-reverse-sync.md (97%) rename {rfc/images => docs/architecture/img}/block-retention.png (100%) rename {rfc => docs/rfc}/images/abci++.png (100%) rename {rfc => docs/rfc}/images/abci.png (100%) create mode 100644 docs/rfc/rfc-011-abci++.md create mode 100644 docs/rfc/rfc-011-delete-gas.md create mode 100644 docs/rfc/rfc-012-semantic-versioning.md rename rfc/004-abci++.md => docs/rfc/rfc-013-abci++.md (99%) rename rfc/006-semantic-versioning.md => docs/rfc/rfc-014-semantic-versioning.md (98%) delete mode 100644 internal/consensus/README.md delete mode 100644 internal/libs/sync/closer.go delete mode 100644 internal/libs/sync/closer_test.go delete mode 100644 libs/events/event_cache.go delete mode 100644 libs/events/event_cache_test.go delete mode 100644 rfc/README.md delete mode 100644 rfc/rfc_template.md delete mode 100644 rust-spec/fastsync/README.md delete mode 100644 rust-spec/lightclient/README.md delete mode 100644 rust-spec/lightclient/verification/README.md delete mode 100644 spec/blockchain/blockchain.md delete mode 100644 spec/blockchain/encoding.md delete mode 100644 spec/blockchain/readme.md delete mode 100644 spec/blockchain/state.md rename {ivy-proofs => spec/ivy-proofs}/Dockerfile (100%) rename {ivy-proofs => spec/ivy-proofs}/README.md (100%) rename {ivy-proofs => spec/ivy-proofs}/abstract_tendermint.ivy (100%) rename {ivy-proofs => spec/ivy-proofs}/accountable_safety_1.ivy (100%) rename {ivy-proofs => spec/ivy-proofs}/accountable_safety_2.ivy (100%) rename {ivy-proofs => spec/ivy-proofs}/check_proofs.sh (100%) rename {ivy-proofs => spec/ivy-proofs}/classic_safety.ivy (100%) rename {ivy-proofs => spec/ivy-proofs}/count_lines.sh (100%) rename {ivy-proofs => spec/ivy-proofs}/docker-compose.yml (99%) rename {ivy-proofs => spec/ivy-proofs}/domain_model.ivy (100%) rename {ivy-proofs => spec/ivy-proofs}/network_shim.ivy (100%) rename {ivy-proofs => spec/ivy-proofs}/output/.gitignore (100%) rename {ivy-proofs => spec/ivy-proofs}/tendermint.ivy (100%) rename {ivy-proofs => spec/ivy-proofs}/tendermint_test.ivy (100%) diff --git a/.github/workflows/action.yml b/.github/workflows/action.yml deleted file mode 100644 index 3cb9cb31b..000000000 --- a/.github/workflows/action.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Check Markdown links - -on: - push: - branches: - - master - pull_request: - branches: [master] - -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 - with: - check-modified-files-only: 'yes' diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml new file mode 100644 index 000000000..1e5f6c36c --- /dev/null +++ b/.github/workflows/e2e-manual.yml @@ -0,0 +1,36 @@ +# Runs randomly generated E2E testnets nightly on master +# manually run e2e tests +name: e2e-manual +on: + workflow_dispatch: + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + group: ['00', '01', '02', '03'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/checkout@v2.4.0 + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 4 -d networks/nightly/ + + - name: Run ${{ matrix.p2p }} p2p testnets + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 2a3f0015f..38cb3a9d4 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -6,7 +6,6 @@ name: e2e-nightly-34x on: - workflow_dispatch: # allow running workflow manually, in theory schedule: - cron: '0 2 * * *' @@ -58,19 +57,3 @@ jobs: SLACK_COLOR: danger SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x SLACK_FOOTER: '' - - e2e-nightly-success: # may turn this off once they seem to pass consistently - needs: e2e-nightly-test - if: ${{ success() }} - runs-on: ubuntu-latest - steps: - - name: Notify Slack on success - uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} - SLACK_CHANNEL: tendermint-internal - SLACK_USERNAME: Nightly E2E Tests - SLACK_ICON_EMOJI: ':white_check_mark:' - SLACK_COLOR: good - SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x - SLACK_FOOTER: '' diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index 13e3f8020..425108169 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -5,7 +5,6 @@ name: e2e-nightly-35x on: - workflow_dispatch: # allow running workflow manually schedule: - cron: '0 2 * * *' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 3602cd832..bc4feae45 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -5,7 +5,6 @@ name: e2e-nightly-master on: - workflow_dispatch: # allow running workflow manually schedule: - cron: '0 2 * * *' diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 12bf3b553..40b00eba1 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,4 +1,4 @@ -name: Lint +name: Golang Linter # Lint runs golangci-lint over the entire Tendermint repository # This workflow is run on every pull request and push to master # The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified. diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 628b1af69..d430485ab 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -1,4 +1,4 @@ -name: Lint +name: Markdown Linter on: push: branches: diff --git a/.github/workflows/markdown-links.yml b/.github/workflows/markdown-links.yml new file mode 100644 index 000000000..0c0e7525a --- /dev/null +++ b/.github/workflows/markdown-links.yml @@ -0,0 +1,18 @@ +# Currently disabled until all links have been fixed +# name: Check Markdown links + +# on: +# push: +# branches: +# - master +# pull_request: +# branches: [master] + +# jobs: +# markdown-link-check: +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@master +# - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 +# with: +# check-modified-files-only: 'yes' diff --git a/.gitignore b/.gitignore index f745afdf2..1e3cbce3c 100644 --- a/.gitignore +++ b/.gitignore @@ -47,13 +47,7 @@ test/fuzz/**/corpus test/fuzz/**/crashers test/fuzz/**/suppressions test/fuzz/**/*.zip -proto/tendermint/blocksync/types.proto -proto/tendermint/consensus/types.proto -proto/tendermint/mempool/*.proto -proto/tendermint/p2p/*.proto -proto/tendermint/statesync/*.proto -proto/tendermint/types/*.proto -proto/tendermint/version/*.proto +proto/spec/**/*.pb.go *.aux *.bbl *.blg @@ -61,4 +55,3 @@ proto/tendermint/version/*.proto *.pdf *.gz *.dvi -*.pb.go diff --git a/.markdownlint.yml b/.markdownlint.yml index baa78a116..80e3be4ed 100644 --- a/.markdownlint.yml +++ b/.markdownlint.yml @@ -1,8 +1,8 @@ default: true MD001: false -MD007: { indent: 4 } +MD007: {indent: 4} MD013: false -MD024: { siblings_only: true } +MD024: {siblings_only: true} MD025: false MD033: false MD036: false diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0e0638d64..9d7c5691e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -20,7 +20,7 @@ Special thanks to external contributors on this release: - Apps - - [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository. + - [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec). - P2P Protocol diff --git a/Makefile b/Makefile index 42a0d45d7..0fd996c67 100644 --- a/Makefile +++ b/Makefile @@ -78,7 +78,7 @@ $(BUILDDIR)/: # there and run the Build & Push Proto Builder Image workflow. IMAGE := ghcr.io/tendermint/docker-build-proto:latest DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE) -HTTPS_GIT := https://github.com/tendermint/spec.git +HTTPS_GIT := https://github.com/tendermint/tendermint.git ############################################################################### ### Protobuf ### diff --git a/abci/client/client.go b/abci/client/client.go index 47a14d9fc..e535aa028 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -33,14 +33,12 @@ type Client interface { // Asynchronous requests FlushAsync(context.Context) (*ReqRes, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error) CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error) // Synchronous requests Flush(context.Context) error Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error) - DeliverTx(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error) CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error) Commit(context.Context) (*types.ResponseCommit, error) @@ -49,8 +47,7 @@ type Client interface { ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error) ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error) VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) - BeginBlock(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - EndBlock(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) + FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 716be6d6a..c4a92aabf 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -193,16 +193,6 @@ func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) { return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}}) } -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) { - req := types.ToRequestDeliverTx(params) - res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}}) -} - // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) { req := types.ToRequestCheckTx(params) @@ -271,18 +261,6 @@ func (cli *grpcClient) Info( return cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true)) } -func (cli *grpcClient) DeliverTx( - ctx context.Context, - params types.RequestDeliverTx, -) (*types.ResponseDeliverTx, error) { - - reqres, err := cli.DeliverTxAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error() -} - func (cli *grpcClient) CheckTx( ctx context.Context, params types.RequestCheckTx, @@ -317,24 +295,6 @@ func (cli *grpcClient) InitChain( return cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true)) } -func (cli *grpcClient) BeginBlock( - ctx context.Context, - params types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { - - req := types.ToRequestBeginBlock(params) - return cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true)) -} - -func (cli *grpcClient) EndBlock( - ctx context.Context, - params types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { - - req := types.ToRequestEndBlock(params) - return cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true)) -} - func (cli *grpcClient) ListSnapshots( ctx context.Context, params types.RequestListSnapshots, @@ -400,3 +360,11 @@ func (cli *grpcClient) VerifyVoteExtension( req := types.ToRequestVerifyVoteExtension(params) return cli.client.VerifyVoteExtension(ctx, req.GetVerifyVoteExtension(), grpc.WaitForReady(true)) } + +func (cli *grpcClient) FinalizeBlock( + ctx context.Context, + params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + + req := types.ToRequestFinalizeBlock(params) + return cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true)) +} diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 25d8ed90e..d233b7222 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -58,17 +58,6 @@ func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) { return newLocalReqRes(types.ToRequestFlush(), nil), nil } -func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.DeliverTx(params) - return app.callback( - types.ToRequestDeliverTx(params), - types.ToResponseDeliverTx(res), - ), nil -} - func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -98,18 +87,6 @@ func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types return &res, nil } -func (app *localClient) DeliverTx( - ctx context.Context, - req types.RequestDeliverTx, -) (*types.ResponseDeliverTx, error) { - - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.DeliverTx(req) - return &res, nil -} - func (app *localClient) CheckTx( ctx context.Context, req types.RequestCheckTx, @@ -152,30 +129,6 @@ func (app *localClient) InitChain( return &res, nil } -func (app *localClient) BeginBlock( - ctx context.Context, - req types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { - - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.BeginBlock(req) - return &res, nil -} - -func (app *localClient) EndBlock( - ctx context.Context, - req types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { - - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.EndBlock(req) - return &res, nil -} - func (app *localClient) ListSnapshots( ctx context.Context, req types.RequestListSnapshots, @@ -266,6 +219,17 @@ func (app *localClient) VerifyVoteExtension( return &res, nil } +func (app *localClient) FinalizeBlock( + ctx context.Context, + req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.FinalizeBlock(req) + return &res, nil +} + //------------------------------------------------------- func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes { diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index dac3c43b4..66f22c3ec 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -40,29 +40,6 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApply return r0, r1 } -// BeginBlock provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - ret := _m.Called(_a0, _a1) - - var r0 *types.ResponseBeginBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseBeginBlock) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CheckTx provides a mock function with given fields: _a0, _a1 func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) @@ -132,52 +109,6 @@ func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) { return r0, r1 } -// DeliverTx provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTx(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *types.ResponseDeliverTx - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseDeliverTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // Echo provides a mock function with given fields: ctx, msg func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { ret := _m.Called(ctx, msg) @@ -201,29 +132,6 @@ func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, er return r0, r1 } -// EndBlock provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlock(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { - ret := _m.Called(_a0, _a1) - - var r0 *types.ResponseEndBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEndBlock) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // Error provides a mock function with given fields: func (_m *Client) Error() error { ret := _m.Called() @@ -261,6 +169,29 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) ( return r0, r1 } +// FinalizeBlock provides a mock function with given fields: _a0, _a1 +func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseFinalizeBlock + if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseFinalizeBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Flush provides a mock function with given fields: _a0 func (_m *Client) Flush(_a0 context.Context) error { ret := _m.Called(_a0) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index fa0fcf97f..a8e873af3 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -226,10 +226,6 @@ func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) { return cli.queueRequestAsync(ctx, types.ToRequestFlush()) } -func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req)) -} - func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) { return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req)) } @@ -280,18 +276,6 @@ func (cli *socketClient) Info( return reqres.Response.GetInfo(), nil } -func (cli *socketClient) DeliverTx( - ctx context.Context, - req types.RequestDeliverTx, -) (*types.ResponseDeliverTx, error) { - - reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestDeliverTx(req)) - if err != nil { - return nil, err - } - return reqres.Response.GetDeliverTx(), nil -} - func (cli *socketClient) CheckTx( ctx context.Context, req types.RequestCheckTx, @@ -334,30 +318,6 @@ func (cli *socketClient) InitChain( return reqres.Response.GetInitChain(), nil } -func (cli *socketClient) BeginBlock( - ctx context.Context, - req types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { - - reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestBeginBlock(req)) - if err != nil { - return nil, err - } - return reqres.Response.GetBeginBlock(), nil -} - -func (cli *socketClient) EndBlock( - ctx context.Context, - req types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { - - reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEndBlock(req)) - if err != nil { - return nil, err - } - return reqres.Response.GetEndBlock(), nil -} - func (cli *socketClient) ListSnapshots( ctx context.Context, req types.RequestListSnapshots, @@ -449,6 +409,17 @@ func (cli *socketClient) VerifyVoteExtension( return reqres.Response.GetVerifyVoteExtension(), nil } +func (cli *socketClient) FinalizeBlock( + ctx context.Context, + req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestFinalizeBlock(req)) + if err != nil { + return nil, err + } + return reqres.Response.GetFinalizeBlock(), nil +} + //---------------------------------------- // queueRequest enqueues req onto the queue. If the queue is full, it ether @@ -550,8 +521,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_Flush) case *types.Request_Info: _, ok = res.Value.(*types.Response_Info) - case *types.Request_DeliverTx: - _, ok = res.Value.(*types.Response_DeliverTx) case *types.Request_CheckTx: _, ok = res.Value.(*types.Response_CheckTx) case *types.Request_Commit: @@ -566,10 +535,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_ExtendVote) case *types.Request_VerifyVoteExtension: _, ok = res.Value.(*types.Response_VerifyVoteExtension) - case *types.Request_BeginBlock: - _, ok = res.Value.(*types.Response_BeginBlock) - case *types.Request_EndBlock: - _, ok = res.Value.(*types.Response_EndBlock) case *types.Request_ApplySnapshotChunk: _, ok = res.Value.(*types.Response_ApplySnapshotChunk) case *types.Request_LoadSnapshotChunk: @@ -578,6 +543,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_ListSnapshots) case *types.Request_OfferSnapshot: _, ok = res.Value.(*types.Response_OfferSnapshot) + case *types.Request_FinalizeBlock: + _, ok = res.Value.(*types.Response_FinalizeBlock) } return ok } diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index 556f98566..9afcce739 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -29,7 +29,7 @@ func TestProperSyncCalls(t *testing.T) { resp := make(chan error, 1) go func() { - rsp, err := c.BeginBlock(ctx, types.RequestBeginBlock{}) + rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{}) assert.NoError(t, err) assert.NoError(t, c.Flush(ctx)) assert.NotNil(t, rsp) @@ -79,7 +79,7 @@ type slowApp struct { types.BaseApplication } -func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { +func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock { time.Sleep(200 * time.Millisecond) - return types.ResponseBeginBlock{} + return types.ResponseFinalizeBlock{} } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index cffbadfb7..5fea32b4e 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -193,7 +193,7 @@ var deliverTxCmd = &cobra.Command{ Short: "deliver a new transaction to the application", Long: "deliver a new transaction to the application", Args: cobra.ExactArgs(1), - RunE: cmdDeliverTx, + RunE: cmdFinalizeBlock, } var checkTxCmd = &cobra.Command{ @@ -300,17 +300,38 @@ func cmdTest(cmd *cobra.Command, args []string) error { []func() error{ func() error { return servertest.InitChain(ctx, client) }, func() error { return servertest.Commit(ctx, client, nil) }, - func() error { return servertest.DeliverTx(ctx, client, []byte("abc"), code.CodeTypeBadNonce, nil) }, + func() error { + return servertest.FinalizeBlock(ctx, client, [][]byte{ + []byte("abc"), + }, []uint32{ + code.CodeTypeBadNonce, + }, nil) + }, func() error { return servertest.Commit(ctx, client, nil) }, - func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeOK, nil) }, + func() error { + return servertest.FinalizeBlock(ctx, client, [][]byte{ + {0x00}, + }, []uint32{ + code.CodeTypeOK, + }, nil) + }, func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, - func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, - func() error { return servertest.DeliverTx(ctx, client, []byte{0x01}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, func() error { - return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) + return servertest.FinalizeBlock(ctx, client, [][]byte{ + {0x00}, + {0x01}, + {0x00, 0x02}, + {0x00, 0x03}, + {0x00, 0x00, 0x04}, + {0x00, 0x00, 0x06}, + }, []uint32{ + code.CodeTypeBadNonce, + code.CodeTypeOK, + code.CodeTypeOK, + code.CodeTypeOK, + code.CodeTypeOK, + code.CodeTypeBadNonce, + }, nil) }, func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, }) @@ -406,7 +427,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { case "commit": return cmdCommit(cmd, actualArgs) case "deliver_tx": - return cmdDeliverTx(cmd, actualArgs) + return cmdFinalizeBlock(cmd, actualArgs) case "echo": return cmdEcho(cmd, actualArgs) case "info": @@ -475,7 +496,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { const codeBad uint32 = 10 // Append a new tx to application -func cmdDeliverTx(cmd *cobra.Command, args []string) error { +func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { if len(args) == 0 { printResponse(cmd, args, response{ Code: codeBad, @@ -487,16 +508,18 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.DeliverTx(cmd.Context(), types.RequestDeliverTx{Tx: txBytes}) + res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) if err != nil { return err } - printResponse(cmd, args, response{ - Code: res.Code, - Data: res.Data, - Info: res.Info, - Log: res.Log, - }) + for _, tx := range res.Txs { + printResponse(cmd, args, response{ + Code: tx.Code, + Data: tx.Data, + Info: tx.Info, + Log: tx.Log, + }) + } return nil } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 99c7cc35c..e10feb76c 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -6,7 +6,6 @@ import ( "math/rand" "net" "os" - "reflect" "testing" "time" @@ -35,7 +34,7 @@ func TestKVStore(t *testing.T) { logger := log.NewTestingLogger(t) logger.Info("### Testing KVStore") - testStream(ctx, t, logger, kvstore.NewApplication()) + testBulk(ctx, t, logger, kvstore.NewApplication()) } func TestBaseApp(t *testing.T) { @@ -44,7 +43,7 @@ func TestBaseApp(t *testing.T) { logger := log.NewTestingLogger(t) logger.Info("### Testing BaseApp") - testStream(ctx, t, logger, types.NewBaseApplication()) + testBulk(ctx, t, logger, types.NewBaseApplication()) } func TestGRPC(t *testing.T) { @@ -57,10 +56,10 @@ func TestGRPC(t *testing.T) { testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication())) } -func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) { +func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) { t.Helper() - const numDeliverTxs = 20000 + const numDeliverTxs = 700000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) @@ -77,51 +76,22 @@ func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types. err = client.Start(ctx) require.NoError(t, err) - done := make(chan struct{}) - counter := 0 - client.SetResponseCallback(func(req *types.Request, res *types.Response) { - // Process response - switch r := res.Value.(type) { - case *types.Response_DeliverTx: - counter++ - if r.DeliverTx.Code != code.CodeTypeOK { - t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code) - } - if counter > numDeliverTxs { - t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs) - } - if counter == numDeliverTxs { - go func() { - time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow - close(done) - }() - return - } - case *types.Response_Flush: - // ignore - default: - t.Error("Unexpected response type", reflect.TypeOf(res.Value)) - } - }) - - // Write requests + // Construct request + rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)} for counter := 0; counter < numDeliverTxs; counter++ { - // Send request - _, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")}) - require.NoError(t, err) - - // Sometimes send flush messages - if counter%128 == 0 { - err = client.Flush(ctx) - require.NoError(t, err) - } + rfb.Txs[counter] = []byte("test") + } + // Send bulk request + res, err := client.FinalizeBlock(ctx, rfb) + require.NoError(t, err) + require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match") + for _, tx := range res.Txs { + require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed") } // Send final flush message - _, err = client.FlushAsync(ctx) + err = client.Flush(ctx) require.NoError(t, err) - - <-done } //------------------------- @@ -133,7 +103,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) { t.Helper() - numDeliverTxs := 2000 + numDeliverTxs := 680000 socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) @@ -142,7 +112,7 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) require.NoError(t, server.Start(ctx)) - t.Cleanup(func() { server.Wait() }) + t.Cleanup(server.Wait) // Connect to the socket conn, err := grpc.Dial(socket, @@ -159,25 +129,17 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type client := types.NewABCIApplicationClient(conn) - // Write requests + // Construct request + rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)} for counter := 0; counter < numDeliverTxs; counter++ { - // Send request - response, err := client.DeliverTx(ctx, &types.RequestDeliverTx{Tx: []byte("test")}) - require.NoError(t, err, "Error in GRPC DeliverTx") - - counter++ - if response.Code != code.CodeTypeOK { - t.Error("DeliverTx failed with ret_code", response.Code) - } - if counter > numDeliverTxs { - t.Fatal("Too many DeliverTx responses") - } - t.Log("response", counter) - if counter == numDeliverTxs { - go func() { - time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow - }() - } + rfb.Txs[counter] = []byte("test") + } + // Send request + response, err := client.FinalizeBlock(ctx, &rfb) + require.NoError(t, err, "Error in GRPC FinalizeBlock") + require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match") + for _, tx := range response.Txs { + require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed") } } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index b6cbce1d9..9f75fd149 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -86,14 +86,13 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) } // tx is either "key=value" or just arbitrary bytes -func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { +func (app *Application) HandleTx(tx []byte) *types.ResponseDeliverTx { var key, value string - - parts := bytes.Split(req.Tx, []byte("=")) + parts := bytes.Split(tx, []byte("=")) if len(parts) == 2 { key, value = string(parts[0]), string(parts[1]) } else { - key, value = string(req.Tx), string(req.Tx) + key, value = string(tx), string(tx) } err := app.state.db.Set(prefixKey([]byte(key)), []byte(value)) @@ -114,7 +113,15 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli }, } - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} + return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} +} + +func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock { + txs := make([]*types.ResponseDeliverTx, len(req.Txs)) + for i, tx := range req.Txs { + txs[i] = app.HandleTx(tx) + } + return types.ResponseFinalizeBlock{Txs: txs} } func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 0c104f6d7..21f54e0fe 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -3,7 +3,6 @@ package kvstore import ( "context" "fmt" - "os" "sort" "testing" @@ -25,12 +24,14 @@ const ( ) func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { - req := types.RequestDeliverTx{Tx: tx} - ar := app.DeliverTx(req) - require.False(t, ar.IsErr(), ar) + req := types.RequestFinalizeBlock{Txs: [][]byte{tx}} + ar := app.FinalizeBlock(req) + require.Equal(t, 1, len(ar.Txs)) + require.False(t, ar.Txs[0].IsErr()) // repeating tx doesn't raise error - ar = app.DeliverTx(req) - require.False(t, ar.IsErr(), ar) + ar = app.FinalizeBlock(req) + require.Equal(t, 1, len(ar.Txs)) + require.False(t, ar.Txs[0].IsErr()) // commit app.Commit() @@ -72,10 +73,7 @@ func TestKVStoreKV(t *testing.T) { } func TestPersistentKVStoreKV(t *testing.T) { - dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO - if err != nil { - t.Fatal(err) - } + dir := t.TempDir() logger := log.NewTestingLogger(t) kvstore := NewPersistentKVStoreApplication(logger, dir) @@ -90,10 +88,7 @@ func TestPersistentKVStoreKV(t *testing.T) { } func TestPersistentKVStoreInfo(t *testing.T) { - dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO - if err != nil { - t.Fatal(err) - } + dir := t.TempDir() logger := log.NewTestingLogger(t) kvstore := NewPersistentKVStoreApplication(logger, dir) @@ -111,8 +106,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { header := tmproto.Header{ Height: height, } - kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) - kvstore.EndBlock(types.RequestEndBlock{Height: header.Height}) + kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height}) kvstore.Commit() resInfo = kvstore.Info(types.RequestInfo{}) @@ -124,10 +118,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { // add a validator, remove a validator, update a validator func TestValUpdates(t *testing.T) { - dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO - if err != nil { - t.Fatal(err) - } + dir := t.TempDir() logger := log.NewTestingLogger(t) kvstore := NewPersistentKVStoreApplication(logger, dir) @@ -204,16 +195,16 @@ func makeApplyBlock( Height: height, } - kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) - for _, tx := range txs { - if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() { - t.Fatal(r) - } - } - resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height}) + resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{ + Hash: hash, + Header: header, + Height: height, + Txs: txs, + }) + kvstore.Commit() - valsEqual(t, diff, resEndBlock.ValidatorUpdates) + valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates) } @@ -330,13 +321,15 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) } func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) { - ar, err := app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx}) + ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) require.NoError(t, err) - require.False(t, ar.IsErr(), ar) - // repeating tx doesn't raise error - ar, err = app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx}) + require.Equal(t, 1, len(ar.Txs)) + require.False(t, ar.Txs[0].IsErr()) + // repeating FinalizeBlock doesn't raise error + ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) require.NoError(t, err) - require.False(t, ar.IsErr(), ar) + require.Equal(t, 1, len(ar.Txs)) + require.False(t, ar.Txs[0].IsErr()) // commit _, err = app.Commit(ctx) require.NoError(t, err) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index be46665c8..830f93235 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -64,21 +64,21 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo } // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes -func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) HandleTx(tx []byte) *types.ResponseDeliverTx { // if it starts with "val:", update the validator set // format is "val:pubkey!power" - if isValidatorTx(req.Tx) { + if isValidatorTx(tx) { // update validators in the merkle tree // and in app.ValUpdates - return app.execValidatorTx(req.Tx) + return app.execValidatorTx(tx) } - if isPrepareTx(req.Tx) { - return app.execPrepareTx(req.Tx) + if isPrepareTx(tx) { + return app.execPrepareTx(tx) } // otherwise, update the key-value store - return app.app.DeliverTx(req) + return app.app.HandleTx(tx) } func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { @@ -121,7 +121,9 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t } // Track the block hash and header information -func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { +// Execute transactions +// Update the validator set +func (app *PersistentKVStoreApplication) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock { // reset valset changes app.ValUpdates = make([]types.ValidatorUpdate, 0) @@ -143,12 +145,12 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) } } - return types.ResponseBeginBlock{} -} + respTxs := make([]*types.ResponseDeliverTx, len(req.Txs)) + for i, tx := range req.Txs { + respTxs[i] = app.HandleTx(tx) + } -// Update the validator set -func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { - return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} + return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates} } func (app *PersistentKVStoreApplication) ListSnapshots( @@ -238,13 +240,13 @@ func isValidatorTx(tx []byte) bool { // format is "val:pubkey!power" // pubkey is a base64-encoded 32-byte ed25519 key -func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) *types.ResponseDeliverTx { tx = tx[len(ValidatorSetChangePrefix):] // get the pubkey and power pubKeyAndPower := strings.Split(string(tx), "!") if len(pubKeyAndPower) != 2 { - return types.ResponseDeliverTx{ + return &types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)} } @@ -253,7 +255,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // decode the pubkey pubkey, err := base64.StdEncoding.DecodeString(pubkeyS) if err != nil { - return types.ResponseDeliverTx{ + return &types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)} } @@ -261,7 +263,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // decode the power power, err := strconv.ParseInt(powerS, 10, 64) if err != nil { - return types.ResponseDeliverTx{ + return &types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Power (%s) is not an int", powerS)} } @@ -271,7 +273,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon } // add, update, or remove a validator -func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx { pubkey, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { panic(fmt.Errorf("can't decode public key: %w", err)) @@ -286,7 +288,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate } if !hasKey { pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes()) - return types.ResponseDeliverTx{ + return &types.ResponseDeliverTx{ Code: code.CodeTypeUnauthorized, Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)} } @@ -298,7 +300,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate // add or update validator value := bytes.NewBuffer(make([]byte, 0)) if err := types.WriteMessage(&v, value); err != nil { - return types.ResponseDeliverTx{ + return &types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("error encoding validator: %v", err)} } @@ -311,7 +313,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate // we only update the changes array if we successfully updated the tree app.ValUpdates = append(app.ValUpdates, v) - return types.ResponseDeliverTx{Code: code.CodeTypeOK} + return &types.ResponseDeliverTx{Code: code.CodeTypeOK} } // ----------------------------- @@ -324,9 +326,9 @@ func isPrepareTx(tx []byte) bool { // execPrepareTx is noop. tx data is considered as placeholder // and is substitute at the PrepareProposal. -func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) *types.ResponseDeliverTx { // noop - return types.ResponseDeliverTx{} + return &types.ResponseDeliverTx{} } // substPrepareTx subst all the preparetx in the blockdata diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index a548755c3..36ff4ef45 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -213,9 +213,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_Info: res := s.app.Info(*r.Info) responses <- types.ToResponseInfo(res) - case *types.Request_DeliverTx: - res := s.app.DeliverTx(*r.DeliverTx) - responses <- types.ToResponseDeliverTx(res) case *types.Request_CheckTx: res := s.app.CheckTx(*r.CheckTx) responses <- types.ToResponseCheckTx(res) @@ -228,12 +225,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_InitChain: res := s.app.InitChain(*r.InitChain) responses <- types.ToResponseInitChain(res) - case *types.Request_BeginBlock: - res := s.app.BeginBlock(*r.BeginBlock) - responses <- types.ToResponseBeginBlock(res) - case *types.Request_EndBlock: - res := s.app.EndBlock(*r.EndBlock) - responses <- types.ToResponseEndBlock(res) case *types.Request_ListSnapshots: res := s.app.ListSnapshots(*r.ListSnapshots) responses <- types.ToResponseListSnapshots(res) @@ -258,6 +249,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_VerifyVoteExtension: res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension) responses <- types.ToResponseVerifyVoteExtension(res) + case *types.Request_FinalizeBlock: + res := s.app.FinalizeBlock(*r.FinalizeBlock) + responses <- types.ToResponseFinalizeBlock(res) default: responses <- types.ToResponseException("Unknown request") } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 2b2d57961..4bdaf5b0e 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -49,22 +49,24 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error return nil } -func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.DeliverTx(ctx, types.RequestDeliverTx{Tx: txBytes}) - code, data, log := res.Code, res.Data, res.Log - if code != codeExp { - fmt.Println("Failed test: DeliverTx") - fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n", - code, codeExp, log) - return errors.New("deliverTx error") - } - if !bytes.Equal(data, dataExp) { - fmt.Println("Failed test: DeliverTx") - fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n", - data, dataExp) - return errors.New("deliverTx error") +func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error { + res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes}) + for i, tx := range res.Txs { + code, data, log := tx.Code, tx.Data, tx.Log + if code != codeExp[i] { + fmt.Println("Failed test: FinalizeBlock") + fmt.Printf("FinalizeBlock response code was unexpected. Got %v expected %v. Log: %v\n", + code, codeExp, log) + return errors.New("FinalizeBlock error") + } + if !bytes.Equal(data, dataExp) { + fmt.Println("Failed test: FinalizeBlock") + fmt.Printf("FinalizeBlock response data was unexpected. Got %X expected %X\n", + data, dataExp) + return errors.New("FinalizeBlock error") + } } - fmt.Println("Passed test: DeliverTx") + fmt.Println("Passed test: FinalizeBlock") return nil } diff --git a/abci/types/application.go b/abci/types/application.go index 98848bb1e..16ae03546 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -20,18 +20,14 @@ type Application interface { InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal ProcessProposal(RequestProcessProposal) ResponseProcessProposal - // Signals the beginning of a block - BeginBlock(RequestBeginBlock) ResponseBeginBlock - // Deliver a tx for full processing - DeliverTx(RequestDeliverTx) ResponseDeliverTx - // Signals the end of a block, returns changes to the validator set - EndBlock(RequestEndBlock) ResponseEndBlock // Commit the state and return the application Merkle root hash Commit() ResponseCommit // Create application specific vote extension ExtendVote(RequestExtendVote) ResponseExtendVote // Verify application's vote extension data VerifyVoteExtension(RequestVerifyVoteExtension) ResponseVerifyVoteExtension + // Deliver the decided block with its txs to the Application + FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock // State Sync Connection ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots @@ -56,10 +52,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo { return ResponseInfo{} } -func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx { - return ResponseDeliverTx{Code: CodeTypeOK} -} - func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx { return ResponseCheckTx{Code: CodeTypeOK} } @@ -86,14 +78,6 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain { return ResponseInitChain{} } -func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock { - return ResponseBeginBlock{} -} - -func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock { - return ResponseEndBlock{} -} - func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots { return ResponseListSnapshots{} } @@ -118,6 +102,16 @@ func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProce return ResponseProcessProposal{} } +func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock { + txs := make([]*ResponseDeliverTx, len(req.Txs)) + for i := range req.Txs { + txs[i] = &ResponseDeliverTx{Code: CodeTypeOK} + } + return ResponseFinalizeBlock{ + Txs: txs, + } +} + //------------------------------------------------------- // GRPCApplication is a GRPC wrapper for Application @@ -142,11 +136,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon return &res, nil } -func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { - res := app.app.DeliverTx(*req) - return &res, nil -} - func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { res := app.app.CheckTx(*req) return &res, nil @@ -167,16 +156,6 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain return &res, nil } -func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { - res := app.app.BeginBlock(*req) - return &res, nil -} - -func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { - res := app.app.EndBlock(*req) - return &res, nil -} - func (app *GRPCApplication) ListSnapshots( ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { res := app.app.ListSnapshots(*req) @@ -224,3 +203,9 @@ func (app *GRPCApplication) ProcessProposal( res := app.app.ProcessProposal(*req) return &res, nil } + +func (app *GRPCApplication) FinalizeBlock( + ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { + res := app.app.FinalizeBlock(*req) + return &res, nil +} diff --git a/abci/types/messages.go b/abci/types/messages.go index 90cbfcc22..74d9b9d1a 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -4,6 +4,7 @@ import ( "io" "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/internal/libs/protoio" ) @@ -44,12 +45,6 @@ func ToRequestInfo(req RequestInfo) *Request { } } -func ToRequestDeliverTx(req RequestDeliverTx) *Request { - return &Request{ - Value: &Request_DeliverTx{&req}, - } -} - func ToRequestCheckTx(req RequestCheckTx) *Request { return &Request{ Value: &Request_CheckTx{&req}, @@ -74,18 +69,6 @@ func ToRequestInitChain(req RequestInitChain) *Request { } } -func ToRequestBeginBlock(req RequestBeginBlock) *Request { - return &Request{ - Value: &Request_BeginBlock{&req}, - } -} - -func ToRequestEndBlock(req RequestEndBlock) *Request { - return &Request{ - Value: &Request_EndBlock{&req}, - } -} - func ToRequestListSnapshots(req RequestListSnapshots) *Request { return &Request{ Value: &Request_ListSnapshots{&req}, @@ -134,6 +117,12 @@ func ToRequestProcessProposal(req RequestProcessProposal) *Request { } } +func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request { + return &Request{ + Value: &Request_FinalizeBlock{&req}, + } +} + //---------------------------------------- func ToResponseException(errStr string) *Response { @@ -159,11 +148,6 @@ func ToResponseInfo(res ResponseInfo) *Response { Value: &Response_Info{&res}, } } -func ToResponseDeliverTx(res ResponseDeliverTx) *Response { - return &Response{ - Value: &Response_DeliverTx{&res}, - } -} func ToResponseCheckTx(res ResponseCheckTx) *Response { return &Response{ @@ -189,18 +173,6 @@ func ToResponseInitChain(res ResponseInitChain) *Response { } } -func ToResponseBeginBlock(res ResponseBeginBlock) *Response { - return &Response{ - Value: &Response_BeginBlock{&res}, - } -} - -func ToResponseEndBlock(res ResponseEndBlock) *Response { - return &Response{ - Value: &Response_EndBlock{&res}, - } -} - func ToResponseListSnapshots(res ResponseListSnapshots) *Response { return &Response{ Value: &Response_ListSnapshots{&res}, @@ -248,3 +220,9 @@ func ToResponseProcessProposal(res ResponseProcessProposal) *Response { Value: &Response_ProcessProposal{&res}, } } + +func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response { + return &Response{ + Value: &Response_FinalizeBlock{&res}, + } +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 25f814ba6..5f98a6ce5 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -120,7 +120,7 @@ func (x ResponseOfferSnapshot_Result) String() string { } func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32, 0} + return fileDescriptor_252557cfdd89a31a, []int{28, 0} } type ResponseApplySnapshotChunk_Result int32 @@ -157,7 +157,7 @@ func (x ResponseApplySnapshotChunk_Result) String() string { } func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34, 0} + return fileDescriptor_252557cfdd89a31a, []int{30, 0} } type ResponseVerifyVoteExtension_Result int32 @@ -188,7 +188,7 @@ func (x ResponseVerifyVoteExtension_Result) String() string { } func (ResponseVerifyVoteExtension_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37, 0} + return fileDescriptor_252557cfdd89a31a, []int{33, 0} } type ResponseProcessProposal_Result int32 @@ -216,7 +216,7 @@ func (x ResponseProcessProposal_Result) String() string { } func (ResponseProcessProposal_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38, 0} + return fileDescriptor_252557cfdd89a31a, []int{34, 0} } type Request struct { @@ -226,10 +226,7 @@ type Request struct { // *Request_Info // *Request_InitChain // *Request_Query - // *Request_BeginBlock // *Request_CheckTx - // *Request_DeliverTx - // *Request_EndBlock // *Request_Commit // *Request_ListSnapshots // *Request_OfferSnapshot @@ -239,6 +236,7 @@ type Request struct { // *Request_ProcessProposal // *Request_ExtendVote // *Request_VerifyVoteExtension + // *Request_FinalizeBlock Value isRequest_Value `protobuf_oneof:"value"` } @@ -296,18 +294,9 @@ type Request_InitChain struct { type Request_Query struct { Query *RequestQuery `protobuf:"bytes,5,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Request_BeginBlock struct { - BeginBlock *RequestBeginBlock `protobuf:"bytes,6,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Request_CheckTx struct { CheckTx *RequestCheckTx `protobuf:"bytes,7,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Request_DeliverTx struct { - DeliverTx *RequestDeliverTx `protobuf:"bytes,8,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Request_EndBlock struct { - EndBlock *RequestEndBlock `protobuf:"bytes,9,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Request_Commit struct { Commit *RequestCommit `protobuf:"bytes,10,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -335,16 +324,16 @@ type Request_ExtendVote struct { type Request_VerifyVoteExtension struct { VerifyVoteExtension *RequestVerifyVoteExtension `protobuf:"bytes,18,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` } +type Request_FinalizeBlock struct { + FinalizeBlock *RequestFinalizeBlock `protobuf:"bytes,19,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` +} func (*Request_Echo) isRequest_Value() {} func (*Request_Flush) isRequest_Value() {} func (*Request_Info) isRequest_Value() {} func (*Request_InitChain) isRequest_Value() {} func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} func (*Request_Commit) isRequest_Value() {} func (*Request_ListSnapshots) isRequest_Value() {} func (*Request_OfferSnapshot) isRequest_Value() {} @@ -354,6 +343,7 @@ func (*Request_PrepareProposal) isRequest_Value() {} func (*Request_ProcessProposal) isRequest_Value() {} func (*Request_ExtendVote) isRequest_Value() {} func (*Request_VerifyVoteExtension) isRequest_Value() {} +func (*Request_FinalizeBlock) isRequest_Value() {} func (m *Request) GetValue() isRequest_Value { if m != nil { @@ -397,13 +387,6 @@ func (m *Request) GetQuery() *RequestQuery { return nil } -func (m *Request) GetBeginBlock() *RequestBeginBlock { - if x, ok := m.GetValue().(*Request_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Request) GetCheckTx() *RequestCheckTx { if x, ok := m.GetValue().(*Request_CheckTx); ok { return x.CheckTx @@ -411,20 +394,6 @@ func (m *Request) GetCheckTx() *RequestCheckTx { return nil } -func (m *Request) GetDeliverTx() *RequestDeliverTx { - if x, ok := m.GetValue().(*Request_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -func (m *Request) GetEndBlock() *RequestEndBlock { - if x, ok := m.GetValue().(*Request_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Request) GetCommit() *RequestCommit { if x, ok := m.GetValue().(*Request_Commit); ok { return x.Commit @@ -488,6 +457,13 @@ func (m *Request) GetVerifyVoteExtension() *RequestVerifyVoteExtension { return nil } +func (m *Request) GetFinalizeBlock() *RequestFinalizeBlock { + if x, ok := m.GetValue().(*Request_FinalizeBlock); ok { + return x.FinalizeBlock + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Request) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -496,10 +472,7 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_Info)(nil), (*Request_InitChain)(nil), (*Request_Query)(nil), - (*Request_BeginBlock)(nil), (*Request_CheckTx)(nil), - (*Request_DeliverTx)(nil), - (*Request_EndBlock)(nil), (*Request_Commit)(nil), (*Request_ListSnapshots)(nil), (*Request_OfferSnapshot)(nil), @@ -509,6 +482,7 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_ProcessProposal)(nil), (*Request_ExtendVote)(nil), (*Request_VerifyVoteExtension)(nil), + (*Request_FinalizeBlock)(nil), } } @@ -812,74 +786,6 @@ func (m *RequestQuery) GetProve() bool { return false } -type RequestBeginBlock struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` -} - -func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } -func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } -func (*RequestBeginBlock) ProtoMessage() {} -func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{6} -} -func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBeginBlock.Merge(m, src) -} -func (m *RequestBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo - -func (m *RequestBeginBlock) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *RequestBeginBlock) GetHeader() types1.Header { - if m != nil { - return m.Header - } - return types1.Header{} -} - -func (m *RequestBeginBlock) GetLastCommitInfo() LastCommitInfo { - if m != nil { - return m.LastCommitInfo - } - return LastCommitInfo{} -} - -func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { - if m != nil { - return m.ByzantineValidators - } - return nil -} - type RequestCheckTx struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=tendermint.abci.CheckTxType" json:"type,omitempty"` @@ -889,7 +795,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{7} + return fileDescriptor_252557cfdd89a31a, []int{6} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -932,94 +838,6 @@ func (m *RequestCheckTx) GetType() CheckTxType { return CheckTxType_New } -type RequestDeliverTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } -func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } -func (*RequestDeliverTx) ProtoMessage() {} -func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{8} -} -func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestDeliverTx.Merge(m, src) -} -func (m *RequestDeliverTx) XXX_Size() int { - return m.Size() -} -func (m *RequestDeliverTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestDeliverTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestDeliverTx proto.InternalMessageInfo - -func (m *RequestDeliverTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type RequestEndBlock struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` -} - -func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } -func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } -func (*RequestEndBlock) ProtoMessage() {} -func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{9} -} -func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEndBlock.Merge(m, src) -} -func (m *RequestEndBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestEndBlock proto.InternalMessageInfo - -func (m *RequestEndBlock) GetHeight() int64 { - if m != nil { - return m.Height - } - return 0 -} - type RequestCommit struct { } @@ -1027,7 +845,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{10} + return fileDescriptor_252557cfdd89a31a, []int{7} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1064,7 +882,7 @@ func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } func (*RequestListSnapshots) ProtoMessage() {} func (*RequestListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{11} + return fileDescriptor_252557cfdd89a31a, []int{8} } func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1103,7 +921,7 @@ func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*RequestOfferSnapshot) ProtoMessage() {} func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{12} + return fileDescriptor_252557cfdd89a31a, []int{9} } func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1157,7 +975,7 @@ func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChu func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestLoadSnapshotChunk) ProtoMessage() {} func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{13} + return fileDescriptor_252557cfdd89a31a, []int{10} } func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1036,7 @@ func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotC func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestApplySnapshotChunk) ProtoMessage() {} func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{14} + return fileDescriptor_252557cfdd89a31a, []int{11} } func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1275,13 +1093,16 @@ type RequestPrepareProposal struct { BlockData [][]byte `protobuf:"bytes,1,rep,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` // If an application decides to populate block_data with extra information, they can not exceed this value. BlockDataSize int64 `protobuf:"varint,2,opt,name=block_data_size,json=blockDataSize,proto3" json:"block_data_size,omitempty"` + // votes includes all votes from the previous block. This contains vote extension data that can be used in proposal + // preparation. The votes here will then form the last commit that gets sent in the proposed block. + Votes []*types1.Vote `protobuf:"bytes,3,rep,name=votes,proto3" json:"votes,omitempty"` } func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } func (*RequestPrepareProposal) ProtoMessage() {} func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{15} + return fileDescriptor_252557cfdd89a31a, []int{12} } func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1324,6 +1145,13 @@ func (m *RequestPrepareProposal) GetBlockDataSize() int64 { return 0 } +func (m *RequestPrepareProposal) GetVotes() []*types1.Vote { + if m != nil { + return m.Votes + } + return nil +} + // Extends a vote with application-side injection type RequestExtendVote struct { Vote *types1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` @@ -1333,7 +1161,7 @@ func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } func (*RequestExtendVote) ProtoMessage() {} func (*RequestExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{13} } func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1378,7 +1206,7 @@ func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExt func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*RequestVerifyVoteExtension) ProtoMessage() {} func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_252557cfdd89a31a, []int{14} } func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1423,7 +1251,7 @@ func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } func (*RequestProcessProposal) ProtoMessage() {} func (*RequestProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{18} + return fileDescriptor_252557cfdd89a31a, []int{15} } func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1466,6 +1294,90 @@ func (m *RequestProcessProposal) GetTxs() [][]byte { return nil } +type RequestFinalizeBlock struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Header types1.Header `protobuf:"bytes,4,opt,name=header,proto3" json:"header"` + LastCommitInfo LastCommitInfo `protobuf:"bytes,5,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` + ByzantineValidators []Evidence `protobuf:"bytes,6,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` +} + +func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } +func (m *RequestFinalizeBlock) String() string { return proto.CompactTextString(m) } +func (*RequestFinalizeBlock) ProtoMessage() {} +func (*RequestFinalizeBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{16} +} +func (m *RequestFinalizeBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestFinalizeBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestFinalizeBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestFinalizeBlock.Merge(m, src) +} +func (m *RequestFinalizeBlock) XXX_Size() int { + return m.Size() +} +func (m *RequestFinalizeBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestFinalizeBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestFinalizeBlock proto.InternalMessageInfo + +func (m *RequestFinalizeBlock) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestFinalizeBlock) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestFinalizeBlock) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestFinalizeBlock) GetHeader() types1.Header { + if m != nil { + return m.Header + } + return types1.Header{} +} + +func (m *RequestFinalizeBlock) GetLastCommitInfo() LastCommitInfo { + if m != nil { + return m.LastCommitInfo + } + return LastCommitInfo{} +} + +func (m *RequestFinalizeBlock) GetByzantineValidators() []Evidence { + if m != nil { + return m.ByzantineValidators + } + return nil +} + type Response struct { // Types that are valid to be assigned to Value: // *Response_Exception @@ -1474,10 +1386,7 @@ type Response struct { // *Response_Info // *Response_InitChain // *Response_Query - // *Response_BeginBlock // *Response_CheckTx - // *Response_DeliverTx - // *Response_EndBlock // *Response_Commit // *Response_ListSnapshots // *Response_OfferSnapshot @@ -1487,6 +1396,7 @@ type Response struct { // *Response_ProcessProposal // *Response_ExtendVote // *Response_VerifyVoteExtension + // *Response_FinalizeBlock Value isResponse_Value `protobuf_oneof:"value"` } @@ -1494,7 +1404,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} + return fileDescriptor_252557cfdd89a31a, []int{17} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1547,18 +1457,9 @@ type Response_InitChain struct { type Response_Query struct { Query *ResponseQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Response_BeginBlock struct { - BeginBlock *ResponseBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Response_CheckTx struct { CheckTx *ResponseCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Response_DeliverTx struct { - DeliverTx *ResponseDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Response_EndBlock struct { - EndBlock *ResponseEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Response_Commit struct { Commit *ResponseCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -1586,6 +1487,9 @@ type Response_ExtendVote struct { type Response_VerifyVoteExtension struct { VerifyVoteExtension *ResponseVerifyVoteExtension `protobuf:"bytes,19,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` } +type Response_FinalizeBlock struct { + FinalizeBlock *ResponseFinalizeBlock `protobuf:"bytes,20,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` +} func (*Response_Exception) isResponse_Value() {} func (*Response_Echo) isResponse_Value() {} @@ -1593,10 +1497,7 @@ func (*Response_Flush) isResponse_Value() {} func (*Response_Info) isResponse_Value() {} func (*Response_InitChain) isResponse_Value() {} func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} func (*Response_Commit) isResponse_Value() {} func (*Response_ListSnapshots) isResponse_Value() {} func (*Response_OfferSnapshot) isResponse_Value() {} @@ -1606,6 +1507,7 @@ func (*Response_PrepareProposal) isResponse_Value() {} func (*Response_ProcessProposal) isResponse_Value() {} func (*Response_ExtendVote) isResponse_Value() {} func (*Response_VerifyVoteExtension) isResponse_Value() {} +func (*Response_FinalizeBlock) isResponse_Value() {} func (m *Response) GetValue() isResponse_Value { if m != nil { @@ -1656,13 +1558,6 @@ func (m *Response) GetQuery() *ResponseQuery { return nil } -func (m *Response) GetBeginBlock() *ResponseBeginBlock { - if x, ok := m.GetValue().(*Response_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Response) GetCheckTx() *ResponseCheckTx { if x, ok := m.GetValue().(*Response_CheckTx); ok { return x.CheckTx @@ -1670,20 +1565,6 @@ func (m *Response) GetCheckTx() *ResponseCheckTx { return nil } -func (m *Response) GetDeliverTx() *ResponseDeliverTx { - if x, ok := m.GetValue().(*Response_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -func (m *Response) GetEndBlock() *ResponseEndBlock { - if x, ok := m.GetValue().(*Response_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Response) GetCommit() *ResponseCommit { if x, ok := m.GetValue().(*Response_Commit); ok { return x.Commit @@ -1747,6 +1628,13 @@ func (m *Response) GetVerifyVoteExtension() *ResponseVerifyVoteExtension { return nil } +func (m *Response) GetFinalizeBlock() *ResponseFinalizeBlock { + if x, ok := m.GetValue().(*Response_FinalizeBlock); ok { + return x.FinalizeBlock + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Response) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1756,10 +1644,7 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_Info)(nil), (*Response_InitChain)(nil), (*Response_Query)(nil), - (*Response_BeginBlock)(nil), (*Response_CheckTx)(nil), - (*Response_DeliverTx)(nil), - (*Response_EndBlock)(nil), (*Response_Commit)(nil), (*Response_ListSnapshots)(nil), (*Response_OfferSnapshot)(nil), @@ -1769,6 +1654,7 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_ProcessProposal)(nil), (*Response_ExtendVote)(nil), (*Response_VerifyVoteExtension)(nil), + (*Response_FinalizeBlock)(nil), } } @@ -1781,7 +1667,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} + return fileDescriptor_252557cfdd89a31a, []int{18} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1825,7 +1711,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} + return fileDescriptor_252557cfdd89a31a, []int{19} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1868,7 +1754,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} + return fileDescriptor_252557cfdd89a31a, []int{20} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1910,7 +1796,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} + return fileDescriptor_252557cfdd89a31a, []int{21} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +1870,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} + return fileDescriptor_252557cfdd89a31a, []int{22} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2051,7 +1937,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} + return fileDescriptor_252557cfdd89a31a, []int{23} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2143,71 +2029,27 @@ func (m *ResponseQuery) GetCodespace() string { return "" } -type ResponseBeginBlock struct { - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` -} - -func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } -func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseBeginBlock) ProtoMessage() {} -func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} -} -func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBeginBlock.Merge(m, src) -} -func (m *ResponseBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *ResponseBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo - -func (m *ResponseBeginBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - -type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` - Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` - // mempool_error is set by Tendermint. - // ABCI applications creating a ResponseCheckTX should not set mempool_error. - MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` + // mempool_error is set by Tendermint. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. + MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} + return fileDescriptor_252557cfdd89a31a, []int{24} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2328,7 +2170,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} + return fileDescriptor_252557cfdd89a31a, []int{25} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2413,66 +2255,6 @@ func (m *ResponseDeliverTx) GetCodespace() string { return "" } -type ResponseEndBlock struct { - ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` -} - -func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } -func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseEndBlock) ProtoMessage() {} -func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} -} -func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEndBlock.Merge(m, src) -} -func (m *ResponseEndBlock) XXX_Size() int { - return m.Size() -} -func (m *ResponseEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo - -func (m *ResponseEndBlock) GetValidatorUpdates() []ValidatorUpdate { - if m != nil { - return m.ValidatorUpdates - } - return nil -} - -func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { - if m != nil { - return m.ConsensusParamUpdates - } - return nil -} - -func (m *ResponseEndBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - type ResponseCommit struct { // reserve 1 Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2483,7 +2265,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} + return fileDescriptor_252557cfdd89a31a, []int{26} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2534,7 +2316,7 @@ func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } func (*ResponseListSnapshots) ProtoMessage() {} func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} + return fileDescriptor_252557cfdd89a31a, []int{27} } func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2578,7 +2360,7 @@ func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*ResponseOfferSnapshot) ProtoMessage() {} func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} + return fileDescriptor_252557cfdd89a31a, []int{28} } func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2622,7 +2404,7 @@ func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotC func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseLoadSnapshotChunk) ProtoMessage() {} func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} + return fileDescriptor_252557cfdd89a31a, []int{29} } func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2668,7 +2450,7 @@ func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapsho func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseApplySnapshotChunk) ProtoMessage() {} func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} + return fileDescriptor_252557cfdd89a31a, []int{30} } func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2726,7 +2508,7 @@ func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } func (*ResponsePrepareProposal) ProtoMessage() {} func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} + return fileDescriptor_252557cfdd89a31a, []int{31} } func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2770,7 +2552,7 @@ func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } func (*ResponseExtendVote) ProtoMessage() {} func (*ResponseExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} + return fileDescriptor_252557cfdd89a31a, []int{32} } func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2814,7 +2596,7 @@ func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteE func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*ResponseVerifyVoteExtension) ProtoMessage() {} func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{33} } func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2859,7 +2641,7 @@ func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } func (*ResponseProcessProposal) ProtoMessage() {} func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_252557cfdd89a31a, []int{34} } func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2902,6 +2684,74 @@ func (m *ResponseProcessProposal) GetEvidence() [][]byte { return nil } +type ResponseFinalizeBlock struct { + Txs []*ResponseDeliverTx `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,2,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } +func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseFinalizeBlock) ProtoMessage() {} +func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{35} +} +func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseFinalizeBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseFinalizeBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFinalizeBlock.Merge(m, src) +} +func (m *ResponseFinalizeBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseFinalizeBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseFinalizeBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseFinalizeBlock proto.InternalMessageInfo + +func (m *ResponseFinalizeBlock) GetTxs() []*ResponseDeliverTx { + if m != nil { + return m.Txs + } + return nil +} + +func (m *ResponseFinalizeBlock) GetValidatorUpdates() []ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *types1.ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseFinalizeBlock) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + type LastCommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` @@ -2911,7 +2761,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} + return fileDescriptor_252557cfdd89a31a, []int{36} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2966,7 +2816,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} + return fileDescriptor_252557cfdd89a31a, []int{37} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +2870,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{41} + return fileDescriptor_252557cfdd89a31a, []int{38} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3084,7 +2934,7 @@ func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{42} + return fileDescriptor_252557cfdd89a31a, []int{39} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3152,7 +3002,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{43} + return fileDescriptor_252557cfdd89a31a, []int{40} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3205,7 +3055,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{44} + return fileDescriptor_252557cfdd89a31a, []int{41} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3258,7 +3108,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{45} + return fileDescriptor_252557cfdd89a31a, []int{42} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3319,7 +3169,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{46} + return fileDescriptor_252557cfdd89a31a, []int{43} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3395,7 +3245,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{47} + return fileDescriptor_252557cfdd89a31a, []int{44} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3472,10 +3322,7 @@ func init() { proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") - proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") - proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") - proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") @@ -3485,6 +3332,7 @@ func init() { proto.RegisterType((*RequestExtendVote)(nil), "tendermint.abci.RequestExtendVote") proto.RegisterType((*RequestVerifyVoteExtension)(nil), "tendermint.abci.RequestVerifyVoteExtension") proto.RegisterType((*RequestProcessProposal)(nil), "tendermint.abci.RequestProcessProposal") + proto.RegisterType((*RequestFinalizeBlock)(nil), "tendermint.abci.RequestFinalizeBlock") proto.RegisterType((*Response)(nil), "tendermint.abci.Response") proto.RegisterType((*ResponseException)(nil), "tendermint.abci.ResponseException") proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.ResponseEcho") @@ -3492,10 +3340,8 @@ func init() { proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") - proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.ResponseBeginBlock") proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.ResponseDeliverTx") - proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.ResponseEndBlock") proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") @@ -3505,6 +3351,7 @@ func init() { proto.RegisterType((*ResponseExtendVote)(nil), "tendermint.abci.ResponseExtendVote") proto.RegisterType((*ResponseVerifyVoteExtension)(nil), "tendermint.abci.ResponseVerifyVoteExtension") proto.RegisterType((*ResponseProcessProposal)(nil), "tendermint.abci.ResponseProcessProposal") + proto.RegisterType((*ResponseFinalizeBlock)(nil), "tendermint.abci.ResponseFinalizeBlock") proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.LastCommitInfo") proto.RegisterType((*Event)(nil), "tendermint.abci.Event") proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") @@ -3519,197 +3366,192 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3029 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0x23, 0xd5, - 0xb5, 0xd7, 0xa7, 0x2d, 0x1d, 0xeb, 0xcb, 0xd7, 0xc3, 0x20, 0x9a, 0x19, 0x7b, 0xe8, 0x29, 0x60, - 0x18, 0xc0, 0x7e, 0x78, 0x0a, 0xde, 0x50, 0xbc, 0xf7, 0xc0, 0xd2, 0xc8, 0xc8, 0x8c, 0x9f, 0xed, - 0x5c, 0x6b, 0x86, 0x22, 0x81, 0x69, 0x5a, 0xd2, 0xb5, 0xd5, 0x8c, 0xd4, 0xdd, 0x74, 0xb7, 0x84, - 0x3d, 0xcb, 0x54, 0xb2, 0xa1, 0xb2, 0x60, 0x99, 0x0d, 0xab, 0x64, 0x9b, 0x7d, 0x56, 0x59, 0xb1, - 0x60, 0x91, 0x05, 0xcb, 0x2c, 0x52, 0x24, 0x05, 0xbb, 0xfc, 0x03, 0xa9, 0x4a, 0x55, 0xaa, 0x52, - 0xf7, 0xa3, 0x3f, 0xa5, 0x96, 0x5a, 0x90, 0x5d, 0x76, 0x7d, 0x4f, 0x9f, 0x73, 0xfa, 0x7e, 0x9e, - 0xf3, 0xfb, 0x9d, 0xbe, 0xf0, 0xac, 0x43, 0xf4, 0x3e, 0xb1, 0x46, 0x9a, 0xee, 0xec, 0xa8, 0xdd, - 0x9e, 0xb6, 0xe3, 0x5c, 0x9a, 0xc4, 0xde, 0x36, 0x2d, 0xc3, 0x31, 0x50, 0xd5, 0x7f, 0xb9, 0x4d, - 0x5f, 0x4a, 0xd7, 0x03, 0xda, 0x3d, 0xeb, 0xd2, 0x74, 0x8c, 0x1d, 0xd3, 0x32, 0x8c, 0x33, 0xae, - 0x2f, 0x5d, 0x0b, 0xbc, 0x66, 0x7e, 0x82, 0xde, 0x42, 0x6f, 0x85, 0xf1, 0x63, 0x72, 0xe9, 0xbe, - 0xbd, 0x3e, 0x65, 0x6b, 0xaa, 0x96, 0x3a, 0x72, 0x5f, 0x6f, 0x9d, 0x1b, 0xc6, 0xf9, 0x90, 0xec, - 0xb0, 0x56, 0x77, 0x7c, 0xb6, 0xe3, 0x68, 0x23, 0x62, 0x3b, 0xea, 0xc8, 0x14, 0x0a, 0x57, 0xce, - 0x8d, 0x73, 0x83, 0x3d, 0xee, 0xd0, 0x27, 0x2e, 0x95, 0x7f, 0x03, 0xb0, 0x8a, 0xc9, 0xa7, 0x63, - 0x62, 0x3b, 0x68, 0x17, 0x72, 0xa4, 0x37, 0x30, 0xea, 0xe9, 0x1b, 0xe9, 0x5b, 0x6b, 0xbb, 0xd7, - 0xb6, 0x23, 0x83, 0xdb, 0x16, 0x7a, 0xad, 0xde, 0xc0, 0x68, 0xa7, 0x30, 0xd3, 0x45, 0xaf, 0x43, - 0xfe, 0x6c, 0x38, 0xb6, 0x07, 0xf5, 0x0c, 0x33, 0xba, 0x1e, 0x67, 0xb4, 0x4f, 0x95, 0xda, 0x29, - 0xcc, 0xb5, 0xe9, 0xa7, 0x34, 0xfd, 0xcc, 0xa8, 0x67, 0xe7, 0x7f, 0xea, 0x40, 0x3f, 0x63, 0x9f, - 0xa2, 0xba, 0xa8, 0x01, 0xa0, 0xe9, 0x9a, 0xa3, 0xf4, 0x06, 0xaa, 0xa6, 0xd7, 0x73, 0xcc, 0xf2, - 0xb9, 0x78, 0x4b, 0xcd, 0x69, 0x52, 0xc5, 0x76, 0x0a, 0x17, 0x35, 0xb7, 0x41, 0xbb, 0xfb, 0xe9, - 0x98, 0x58, 0x97, 0xf5, 0xfc, 0xfc, 0xee, 0xfe, 0x84, 0x2a, 0xd1, 0xee, 0x32, 0x6d, 0xd4, 0x82, - 0xb5, 0x2e, 0x39, 0xd7, 0x74, 0xa5, 0x3b, 0x34, 0x7a, 0x8f, 0xeb, 0x2b, 0xcc, 0x58, 0x8e, 0x33, - 0x6e, 0x50, 0xd5, 0x06, 0xd5, 0x6c, 0xa7, 0x30, 0x74, 0xbd, 0x16, 0xfa, 0x1f, 0x28, 0xf4, 0x06, - 0xa4, 0xf7, 0x58, 0x71, 0x2e, 0xea, 0xab, 0xcc, 0xc7, 0x56, 0x9c, 0x8f, 0x26, 0xd5, 0xeb, 0x5c, - 0xb4, 0x53, 0x78, 0xb5, 0xc7, 0x1f, 0xe9, 0xf8, 0xfb, 0x64, 0xa8, 0x4d, 0x88, 0x45, 0xed, 0x0b, - 0xf3, 0xc7, 0x7f, 0x8f, 0x6b, 0x32, 0x0f, 0xc5, 0xbe, 0xdb, 0x40, 0x6f, 0x43, 0x91, 0xe8, 0x7d, - 0x31, 0x8c, 0x22, 0x73, 0x71, 0x23, 0x76, 0x9d, 0xf5, 0xbe, 0x3b, 0x88, 0x02, 0x11, 0xcf, 0xe8, - 0x2e, 0xac, 0xf4, 0x8c, 0xd1, 0x48, 0x73, 0xea, 0xc0, 0xac, 0x37, 0x63, 0x07, 0xc0, 0xb4, 0xda, - 0x29, 0x2c, 0xf4, 0xd1, 0x11, 0x54, 0x86, 0x9a, 0xed, 0x28, 0xb6, 0xae, 0x9a, 0xf6, 0xc0, 0x70, - 0xec, 0xfa, 0x1a, 0xf3, 0xf0, 0x7c, 0x9c, 0x87, 0x43, 0xcd, 0x76, 0x4e, 0x5d, 0xe5, 0x76, 0x0a, - 0x97, 0x87, 0x41, 0x01, 0xf5, 0x67, 0x9c, 0x9d, 0x11, 0xcb, 0x73, 0x58, 0x2f, 0xcd, 0xf7, 0x77, - 0x4c, 0xb5, 0x5d, 0x7b, 0xea, 0xcf, 0x08, 0x0a, 0xd0, 0xcf, 0x60, 0x63, 0x68, 0xa8, 0x7d, 0xcf, - 0x9d, 0xd2, 0x1b, 0x8c, 0xf5, 0xc7, 0xf5, 0x32, 0x73, 0xfa, 0x52, 0x6c, 0x27, 0x0d, 0xb5, 0xef, - 0xba, 0x68, 0x52, 0x83, 0x76, 0x0a, 0xaf, 0x0f, 0xa3, 0x42, 0xf4, 0x08, 0xae, 0xa8, 0xa6, 0x39, - 0xbc, 0x8c, 0x7a, 0xaf, 0x30, 0xef, 0xb7, 0xe3, 0xbc, 0xef, 0x51, 0x9b, 0xa8, 0x7b, 0xa4, 0x4e, - 0x49, 0x51, 0x07, 0x6a, 0xa6, 0x45, 0x4c, 0xd5, 0x22, 0x8a, 0x69, 0x19, 0xa6, 0x61, 0xab, 0xc3, - 0x7a, 0x95, 0xf9, 0x7e, 0x31, 0xce, 0xf7, 0x09, 0xd7, 0x3f, 0x11, 0xea, 0xed, 0x14, 0xae, 0x9a, - 0x61, 0x11, 0xf7, 0x6a, 0xf4, 0x88, 0x6d, 0xfb, 0x5e, 0x6b, 0x8b, 0xbc, 0x32, 0xfd, 0xb0, 0xd7, - 0x90, 0x88, 0x1e, 0x26, 0x72, 0x41, 0xcd, 0x95, 0x89, 0xe1, 0x90, 0xfa, 0xfa, 0xfc, 0xc3, 0xd4, - 0x62, 0xaa, 0x0f, 0x0d, 0x87, 0xd0, 0xc3, 0x44, 0xbc, 0x16, 0x52, 0xe1, 0xa9, 0x09, 0xb1, 0xb4, - 0xb3, 0x4b, 0xe6, 0x46, 0x61, 0x6f, 0x6c, 0xcd, 0xd0, 0xeb, 0x88, 0x39, 0x7c, 0x39, 0xce, 0xe1, - 0x43, 0x66, 0x44, 0x5d, 0xb4, 0x5c, 0x93, 0x76, 0x0a, 0x6f, 0x4c, 0xa6, 0xc5, 0x8d, 0x55, 0xc8, - 0x4f, 0xd4, 0xe1, 0x98, 0xc8, 0x2f, 0xc2, 0x5a, 0x20, 0xf8, 0xa1, 0x3a, 0xac, 0x8e, 0x88, 0x6d, - 0xab, 0xe7, 0x84, 0xc5, 0xca, 0x22, 0x76, 0x9b, 0x72, 0x05, 0x4a, 0xc1, 0x80, 0x27, 0x7f, 0x91, - 0xf6, 0x2c, 0x69, 0x2c, 0xa3, 0x96, 0x13, 0x62, 0xb1, 0x6e, 0x0a, 0x4b, 0xd1, 0x44, 0x37, 0xa1, - 0xcc, 0x4e, 0xa5, 0xe2, 0xbe, 0xa7, 0x01, 0x35, 0x87, 0x4b, 0x4c, 0xf8, 0x50, 0x28, 0x6d, 0xc1, - 0x9a, 0xb9, 0x6b, 0x7a, 0x2a, 0x59, 0xa6, 0x02, 0xe6, 0xae, 0xe9, 0x2a, 0x3c, 0x07, 0x25, 0x3a, - 0x56, 0x4f, 0x23, 0xc7, 0x3e, 0xb2, 0x46, 0x65, 0x42, 0x45, 0xfe, 0x63, 0x06, 0x6a, 0xd1, 0x20, - 0x89, 0xee, 0x42, 0x8e, 0xe6, 0x0b, 0x11, 0xfa, 0xa5, 0x6d, 0x9e, 0x4c, 0xb6, 0xdd, 0x64, 0xb2, - 0xdd, 0x71, 0x93, 0x49, 0xa3, 0xf0, 0xf5, 0xb7, 0x5b, 0xa9, 0x2f, 0xfe, 0xb2, 0x95, 0xc6, 0xcc, - 0x02, 0x3d, 0x43, 0x63, 0x9a, 0xaa, 0xe9, 0x8a, 0xd6, 0x67, 0x5d, 0x2e, 0xd2, 0x80, 0xa5, 0x6a, - 0xfa, 0x41, 0x1f, 0x1d, 0x42, 0xad, 0x67, 0xe8, 0x36, 0xd1, 0xed, 0xb1, 0xad, 0xf0, 0x64, 0x25, - 0x02, 0x7e, 0x28, 0x6c, 0xf1, 0x14, 0xd8, 0x74, 0x35, 0x4f, 0x98, 0x22, 0xae, 0xf6, 0xc2, 0x02, - 0xb4, 0x0f, 0x30, 0x51, 0x87, 0x5a, 0x5f, 0x75, 0x0c, 0xcb, 0xae, 0xe7, 0x6e, 0x64, 0x67, 0xc6, - 0xae, 0x87, 0xae, 0xca, 0x03, 0xb3, 0xaf, 0x3a, 0xa4, 0x91, 0xa3, 0xdd, 0xc5, 0x01, 0x4b, 0xf4, - 0x02, 0x54, 0x55, 0xd3, 0x54, 0x6c, 0x47, 0x75, 0x88, 0xd2, 0xbd, 0x74, 0x88, 0xcd, 0x92, 0x41, - 0x09, 0x97, 0x55, 0xd3, 0x3c, 0xa5, 0xd2, 0x06, 0x15, 0xa2, 0xe7, 0xa1, 0x42, 0xf3, 0x86, 0xa6, - 0x0e, 0x95, 0x01, 0xd1, 0xce, 0x07, 0x0e, 0x0b, 0xfb, 0x59, 0x5c, 0x16, 0xd2, 0x36, 0x13, 0xca, - 0x7d, 0x6f, 0xc5, 0x59, 0xce, 0x40, 0x08, 0x72, 0x7d, 0xd5, 0x51, 0xd9, 0x4c, 0x96, 0x30, 0x7b, - 0xa6, 0x32, 0x53, 0x75, 0x06, 0x62, 0x7e, 0xd8, 0x33, 0xba, 0x0a, 0x2b, 0xc2, 0x6d, 0x96, 0xb9, - 0x15, 0x2d, 0x74, 0x05, 0xf2, 0xa6, 0x65, 0x4c, 0x08, 0x5b, 0xba, 0x02, 0xe6, 0x0d, 0xf9, 0x17, - 0x19, 0x58, 0x9f, 0xca, 0x2e, 0xd4, 0xef, 0x40, 0xb5, 0x07, 0xee, 0xb7, 0xe8, 0x33, 0x7a, 0x83, - 0xfa, 0x55, 0xfb, 0xc4, 0x12, 0x19, 0xb9, 0x3e, 0x3d, 0xd5, 0x6d, 0xf6, 0x5e, 0x4c, 0x8d, 0xd0, - 0x46, 0xc7, 0x50, 0x1b, 0xaa, 0xb6, 0xa3, 0xf0, 0x68, 0xad, 0x04, 0xb2, 0xf3, 0x74, 0x8e, 0x3a, - 0x54, 0xdd, 0xf8, 0x4e, 0x37, 0xb5, 0x70, 0x54, 0x19, 0x86, 0xa4, 0x08, 0xc3, 0x95, 0xee, 0xe5, - 0x13, 0x55, 0x77, 0x34, 0x9d, 0x28, 0x53, 0x2b, 0xf7, 0xcc, 0x94, 0xd3, 0xd6, 0x44, 0xeb, 0x13, - 0xbd, 0xe7, 0x2e, 0xd9, 0x86, 0x67, 0xec, 0x2d, 0xa9, 0x2d, 0x63, 0xa8, 0x84, 0xf3, 0x23, 0xaa, - 0x40, 0xc6, 0xb9, 0x10, 0x13, 0x90, 0x71, 0x2e, 0xd0, 0x7f, 0x41, 0x8e, 0x0e, 0x92, 0x0d, 0xbe, - 0x32, 0x03, 0x58, 0x08, 0xbb, 0xce, 0xa5, 0x49, 0x30, 0xd3, 0x94, 0x65, 0xef, 0x38, 0x78, 0x39, - 0x33, 0xea, 0x55, 0x7e, 0x09, 0xaa, 0x91, 0xa4, 0x18, 0x58, 0xbf, 0x74, 0x70, 0xfd, 0xe4, 0x2a, - 0x94, 0x43, 0x19, 0x50, 0xbe, 0x0a, 0x57, 0x66, 0x25, 0x34, 0x79, 0xe0, 0xc9, 0x43, 0x89, 0x09, - 0xbd, 0x0e, 0x05, 0x2f, 0xa3, 0xf1, 0xe3, 0x38, 0x3d, 0x57, 0xae, 0x32, 0xf6, 0x54, 0xe9, 0x39, - 0xa4, 0xdb, 0x9a, 0xed, 0x87, 0x0c, 0xeb, 0xf8, 0xaa, 0x6a, 0x9a, 0x6d, 0xd5, 0x1e, 0xc8, 0x1f, - 0x43, 0x3d, 0x2e, 0x5b, 0x45, 0x86, 0x91, 0xf3, 0xb6, 0xe1, 0x55, 0x58, 0x39, 0x33, 0xac, 0x91, - 0xea, 0x30, 0x67, 0x65, 0x2c, 0x5a, 0x74, 0x7b, 0xf2, 0xcc, 0x95, 0x65, 0x62, 0xde, 0x90, 0x15, - 0x78, 0x26, 0x36, 0x63, 0x51, 0x13, 0x4d, 0xef, 0x13, 0x3e, 0x9f, 0x65, 0xcc, 0x1b, 0xbe, 0x23, - 0xde, 0x59, 0xde, 0xa0, 0x9f, 0xb5, 0xd9, 0x58, 0x99, 0xff, 0x22, 0x16, 0x2d, 0x59, 0x81, 0xab, - 0xb3, 0xd3, 0x16, 0xba, 0x0e, 0xc0, 0xe3, 0xa6, 0x38, 0x75, 0xd9, 0x5b, 0x25, 0x5c, 0x64, 0x92, - 0x7b, 0xf4, 0xe8, 0xbd, 0x00, 0x55, 0xff, 0xb5, 0x62, 0x6b, 0x4f, 0xf8, 0xd6, 0xc8, 0xe2, 0xb2, - 0xa7, 0x73, 0xaa, 0x3d, 0x21, 0xf2, 0xdb, 0xde, 0xf9, 0xf2, 0x13, 0x0e, 0xba, 0x0d, 0x39, 0x96, - 0xa2, 0xf8, 0x32, 0x5c, 0x9d, 0x3e, 0x49, 0x54, 0x0b, 0x33, 0x1d, 0xb9, 0x0d, 0x52, 0x7c, 0x82, - 0x59, 0xca, 0x53, 0x37, 0x30, 0xd6, 0x70, 0xe6, 0xf4, 0xcf, 0x76, 0x7a, 0xa9, 0xb3, 0x5d, 0x83, - 0xac, 0x73, 0x61, 0xd7, 0x33, 0x6c, 0x72, 0xe8, 0xa3, 0xfc, 0x0f, 0x80, 0x02, 0x26, 0xb6, 0x49, - 0x63, 0x2c, 0x6a, 0x40, 0x91, 0x5c, 0xf4, 0x88, 0xe9, 0xb8, 0x69, 0x69, 0x76, 0x3a, 0xe6, 0xda, - 0x2d, 0x57, 0x93, 0x02, 0x4b, 0xcf, 0x0c, 0xdd, 0x11, 0xdc, 0x21, 0x9e, 0x06, 0x08, 0xf3, 0x20, - 0x79, 0x78, 0xc3, 0x25, 0x0f, 0xd9, 0x58, 0x2c, 0xc9, 0xad, 0x22, 0xec, 0xe1, 0x8e, 0x60, 0x0f, - 0xb9, 0x05, 0x1f, 0x0b, 0xd1, 0x87, 0x66, 0x88, 0x3e, 0xe4, 0x17, 0x0c, 0x33, 0x86, 0x3f, 0xbc, - 0xe1, 0xf2, 0x87, 0x95, 0x05, 0x3d, 0x8e, 0x10, 0x88, 0xfd, 0x30, 0x81, 0xe0, 0xe0, 0xff, 0x66, - 0xac, 0x75, 0x2c, 0x83, 0xf8, 0xdf, 0x00, 0x83, 0x28, 0xc4, 0xc2, 0x77, 0xee, 0x64, 0x06, 0x85, - 0x68, 0x86, 0x28, 0x44, 0x71, 0xc1, 0x1c, 0xc4, 0x70, 0x88, 0x77, 0x82, 0x1c, 0x02, 0x62, 0x69, - 0x88, 0x58, 0xef, 0x59, 0x24, 0xe2, 0x4d, 0x8f, 0x44, 0xac, 0xc5, 0xb2, 0x20, 0x31, 0x86, 0x28, - 0x8b, 0x38, 0x9e, 0x62, 0x11, 0x1c, 0xf5, 0xbf, 0x10, 0xeb, 0x62, 0x01, 0x8d, 0x38, 0x9e, 0xa2, - 0x11, 0xe5, 0x05, 0x0e, 0x17, 0xf0, 0x88, 0x0f, 0x67, 0xf3, 0x88, 0x78, 0xa4, 0x2f, 0xba, 0x99, - 0x8c, 0x48, 0x28, 0x31, 0x44, 0xa2, 0x1a, 0x0b, 0x7a, 0xb9, 0xfb, 0xc4, 0x4c, 0xe2, 0xc1, 0x0c, - 0x26, 0xc1, 0x31, 0xff, 0xad, 0x58, 0xe7, 0x09, 0xa8, 0xc4, 0x83, 0x19, 0x54, 0x62, 0x7d, 0xa1, - 0xdb, 0x85, 0x5c, 0x62, 0x3f, 0xcc, 0x25, 0xd0, 0x82, 0x73, 0x15, 0x4b, 0x26, 0xba, 0x71, 0x64, - 0x62, 0x83, 0x79, 0x7c, 0x25, 0xd6, 0xe3, 0x0f, 0x61, 0x13, 0x2f, 0xd1, 0x5c, 0x13, 0x89, 0xa6, - 0x34, 0x1f, 0x12, 0xcb, 0x32, 0x2c, 0xc1, 0x0b, 0x78, 0x43, 0xbe, 0x45, 0xd1, 0xa5, 0x1f, 0x39, - 0xe7, 0x30, 0x0f, 0x86, 0x3b, 0x02, 0xd1, 0x52, 0xfe, 0x7d, 0xda, 0xb7, 0x65, 0x80, 0x2c, 0x88, - 0x4c, 0x8b, 0x02, 0x99, 0x06, 0xf8, 0x48, 0x26, 0xcc, 0x47, 0xb6, 0x60, 0x8d, 0xe2, 0x89, 0x08, - 0xd5, 0x50, 0x4d, 0x8f, 0x6a, 0xdc, 0x86, 0x75, 0x06, 0x18, 0x79, 0x7a, 0x15, 0x20, 0x22, 0xc7, - 0x72, 0x6b, 0x95, 0xbe, 0xe0, 0xc7, 0x9e, 0xa3, 0x89, 0x57, 0x61, 0x23, 0xa0, 0xeb, 0xe1, 0x14, - 0x8e, 0xbb, 0x6b, 0x9e, 0xf6, 0x9e, 0x00, 0x2c, 0x5f, 0xa5, 0xfd, 0x19, 0xf2, 0x39, 0xca, 0x2c, - 0x3a, 0x91, 0xfe, 0x37, 0xd1, 0x89, 0xcc, 0x0f, 0xa6, 0x13, 0x41, 0xdc, 0x95, 0x0d, 0xe3, 0xae, - 0xbf, 0xa7, 0xfd, 0x35, 0xf1, 0xc8, 0x41, 0xcf, 0xe8, 0x13, 0x81, 0x84, 0xd8, 0x33, 0x4d, 0xce, - 0x43, 0xe3, 0x5c, 0xe0, 0x1d, 0xfa, 0x48, 0xb5, 0xbc, 0xf4, 0x56, 0x14, 0xd9, 0xcb, 0x03, 0x51, - 0x79, 0x36, 0xc3, 0x02, 0x44, 0xd5, 0x20, 0xfb, 0x98, 0xf0, 0x64, 0x54, 0xc2, 0xf4, 0x91, 0xea, - 0xb1, 0x4d, 0xc6, 0x52, 0x4c, 0x09, 0xf3, 0x06, 0xba, 0x0b, 0x45, 0x56, 0x86, 0x54, 0x0c, 0xd3, - 0x16, 0x79, 0xe3, 0xd9, 0xe0, 0x58, 0x79, 0xb5, 0x71, 0xfb, 0x84, 0xea, 0x1c, 0x9b, 0x36, 0x2e, - 0x98, 0xe2, 0x29, 0x80, 0x0f, 0x8b, 0x21, 0x9a, 0x72, 0x0d, 0x8a, 0xb4, 0xf7, 0xb6, 0xa9, 0xf6, - 0x08, 0x4b, 0x02, 0x45, 0xec, 0x0b, 0xe4, 0x47, 0x80, 0xa6, 0x53, 0x19, 0x6a, 0xc3, 0x0a, 0x99, - 0x10, 0xdd, 0xb1, 0x19, 0x4c, 0x8b, 0xc0, 0x20, 0xc1, 0x01, 0x88, 0xee, 0x34, 0xea, 0x74, 0x92, - 0xff, 0xf6, 0xed, 0x56, 0x8d, 0x6b, 0xbf, 0x62, 0x8c, 0x34, 0x87, 0x8c, 0x4c, 0xe7, 0x12, 0x0b, - 0x7b, 0xf9, 0xcf, 0x19, 0x0a, 0xc8, 0x43, 0x69, 0x6e, 0xe6, 0xdc, 0xba, 0x5b, 0x3e, 0x13, 0x20, - 0x63, 0xc9, 0xe6, 0x7b, 0x13, 0xe0, 0x5c, 0xb5, 0x95, 0xcf, 0x54, 0xdd, 0x21, 0x7d, 0x31, 0xe9, - 0x01, 0x09, 0x92, 0xa0, 0x40, 0x5b, 0x63, 0x9b, 0xf4, 0x05, 0x2f, 0xf4, 0xda, 0x81, 0x71, 0xae, - 0xfe, 0xb8, 0x71, 0x86, 0x67, 0xb9, 0x10, 0x99, 0xe5, 0x00, 0x58, 0x2e, 0x06, 0xc1, 0x32, 0xed, - 0x9b, 0x69, 0x69, 0x86, 0xa5, 0x39, 0x97, 0x6c, 0x69, 0xb2, 0xd8, 0x6b, 0xa3, 0x9b, 0x50, 0x1e, - 0x91, 0x91, 0x69, 0x18, 0x43, 0x85, 0x87, 0x9b, 0x35, 0x66, 0x5a, 0x12, 0xc2, 0x16, 0x8b, 0x3a, - 0xbf, 0xcc, 0xf8, 0xe7, 0xcf, 0x27, 0x45, 0xff, 0x71, 0x13, 0x2c, 0xff, 0x8a, 0x95, 0x4a, 0xc2, - 0x40, 0x06, 0x9d, 0xc2, 0xba, 0x77, 0xfc, 0x95, 0x31, 0x0b, 0x0b, 0xee, 0x86, 0x4e, 0x1a, 0x3f, - 0x6a, 0x93, 0xb0, 0xd8, 0x46, 0x1f, 0xc0, 0xd3, 0x91, 0xd8, 0xe6, 0xb9, 0xce, 0x24, 0x0d, 0x71, - 0x4f, 0x85, 0x43, 0x9c, 0xeb, 0xda, 0x9f, 0xac, 0xec, 0x8f, 0x3c, 0x75, 0x07, 0x94, 0x7d, 0x07, - 0x71, 0xd9, 0xcc, 0xe5, 0xbf, 0x09, 0x65, 0x8b, 0x38, 0xaa, 0xa6, 0x2b, 0xa1, 0xfa, 0x46, 0x89, - 0x0b, 0x45, 0xd5, 0xe4, 0x04, 0x9e, 0x9a, 0x89, 0xcf, 0xd0, 0x7f, 0x43, 0xd1, 0x87, 0x76, 0xe9, - 0x98, 0x52, 0x81, 0x47, 0x7f, 0x7d, 0x5d, 0xf9, 0x0f, 0x69, 0xdf, 0x65, 0x98, 0x50, 0xb7, 0x60, - 0xc5, 0x22, 0xf6, 0x78, 0xc8, 0x29, 0x6e, 0x65, 0xf7, 0xd5, 0x64, 0xc8, 0x8e, 0x4a, 0xc7, 0x43, - 0x07, 0x0b, 0x63, 0xf9, 0x11, 0xac, 0x70, 0x09, 0x5a, 0x83, 0xd5, 0x07, 0x47, 0xf7, 0x8f, 0x8e, - 0xdf, 0x3f, 0xaa, 0xa5, 0x10, 0xc0, 0xca, 0x5e, 0xb3, 0xd9, 0x3a, 0xe9, 0xd4, 0xd2, 0xa8, 0x08, - 0xf9, 0xbd, 0xc6, 0x31, 0xee, 0xd4, 0x32, 0x54, 0x8c, 0x5b, 0xef, 0xb5, 0x9a, 0x9d, 0x5a, 0x16, - 0xad, 0x43, 0x99, 0x3f, 0x2b, 0xfb, 0xc7, 0xf8, 0xff, 0xf7, 0x3a, 0xb5, 0x5c, 0x40, 0x74, 0xda, - 0x3a, 0xba, 0xd7, 0xc2, 0xb5, 0xbc, 0xfc, 0x1a, 0xe5, 0xd0, 0x31, 0x58, 0xd0, 0x67, 0xcb, 0xe9, - 0x00, 0x5b, 0x96, 0x7f, 0x9d, 0xa1, 0xa4, 0x33, 0x0e, 0xe0, 0xa1, 0xf7, 0x22, 0x03, 0xdf, 0x5d, - 0x02, 0x1d, 0x46, 0x46, 0x8f, 0x9e, 0x87, 0x8a, 0x45, 0xce, 0x88, 0xd3, 0x1b, 0x70, 0xc0, 0xc9, - 0x53, 0x66, 0x19, 0x97, 0x85, 0x94, 0x19, 0xd9, 0x5c, 0xed, 0x13, 0xd2, 0x73, 0x14, 0x1e, 0x8b, - 0xf8, 0xa6, 0x2b, 0x52, 0x35, 0x2a, 0x3d, 0xe5, 0x42, 0xf9, 0xe3, 0xa5, 0xe6, 0xb2, 0x08, 0x79, - 0xdc, 0xea, 0xe0, 0x0f, 0x6a, 0x59, 0x84, 0xa0, 0xc2, 0x1e, 0x95, 0xd3, 0xa3, 0xbd, 0x93, 0xd3, - 0xf6, 0x31, 0x9d, 0xcb, 0x0d, 0xa8, 0xba, 0x73, 0xe9, 0x0a, 0xf3, 0xf2, 0x5d, 0x78, 0x3a, 0x06, - 0x9d, 0x2e, 0xa8, 0x18, 0xc8, 0x1f, 0xfa, 0xb9, 0x2b, 0x50, 0x0a, 0xd8, 0x87, 0x4a, 0x04, 0x19, - 0xa6, 0xa7, 0xa9, 0x8b, 0x4f, 0xe5, 0x3d, 0xd4, 0x87, 0xcb, 0x93, 0x60, 0x53, 0xfe, 0x6d, 0x1a, - 0x9e, 0x9d, 0x83, 0x1d, 0xd1, 0xfd, 0xc8, 0x9a, 0xdd, 0x59, 0x06, 0x79, 0x46, 0xb7, 0xec, 0xdd, - 0x44, 0xd3, 0x7c, 0x7a, 0xb8, 0x77, 0xda, 0x0e, 0x6f, 0x59, 0xf9, 0x77, 0xe9, 0xe0, 0xfc, 0x85, - 0x31, 0xf7, 0xbb, 0x91, 0x2e, 0xee, 0x24, 0x05, 0xf0, 0xd1, 0x3d, 0x25, 0x41, 0x81, 0x88, 0xa2, - 0x9f, 0xa8, 0x4d, 0x78, 0x6d, 0xf9, 0xd5, 0xc5, 0x5d, 0xf7, 0xfb, 0x9b, 0x91, 0x3f, 0x82, 0x4a, - 0xb8, 0x28, 0x49, 0x4f, 0x8c, 0x65, 0x8c, 0xf5, 0x3e, 0xeb, 0x64, 0x1e, 0xf3, 0x06, 0x7a, 0x1d, - 0xf2, 0x74, 0x3d, 0x5c, 0xc0, 0x37, 0x1d, 0x5a, 0xe8, 0x7c, 0x06, 0x8a, 0x9a, 0x5c, 0x5b, 0x7e, - 0x02, 0x79, 0x16, 0x24, 0x69, 0xc0, 0x63, 0xe5, 0x45, 0x81, 0xa1, 0xe9, 0x33, 0xfa, 0x08, 0x40, - 0x75, 0x1c, 0x4b, 0xeb, 0x8e, 0x7d, 0xc7, 0x5b, 0xb3, 0x83, 0xec, 0x9e, 0xab, 0xd7, 0xb8, 0x26, - 0xa2, 0xed, 0x15, 0xdf, 0x34, 0x10, 0x71, 0x03, 0x0e, 0xe5, 0x23, 0xa8, 0x84, 0x6d, 0x5d, 0xd4, - 0xc7, 0xfb, 0x10, 0x46, 0x7d, 0x1c, 0xc4, 0x0b, 0xd4, 0xe7, 0x61, 0xc6, 0x2c, 0x2f, 0x25, 0xb3, - 0x86, 0xfc, 0x79, 0x1a, 0x0a, 0x9d, 0x0b, 0x31, 0xb9, 0x31, 0x55, 0x4c, 0xdf, 0x34, 0x13, 0xac, - 0xd9, 0xf1, 0xb2, 0x68, 0xd6, 0x2b, 0xb6, 0xbe, 0xe3, 0xed, 0x84, 0x5c, 0xd2, 0x52, 0x82, 0x5b, - 0x99, 0x12, 0x3b, 0xf4, 0x2d, 0x28, 0x7a, 0x29, 0x92, 0x92, 0x11, 0xb5, 0xdf, 0xb7, 0x88, 0x6d, - 0x8b, 0x30, 0xe7, 0x36, 0x59, 0x51, 0xdc, 0xf8, 0x4c, 0x54, 0x05, 0xb3, 0x98, 0x37, 0xe4, 0x3e, - 0x54, 0x23, 0xf9, 0x15, 0xbd, 0x05, 0xab, 0xe6, 0xb8, 0xab, 0xb8, 0xd3, 0x13, 0xf9, 0xb5, 0xec, - 0xc2, 0xdc, 0x71, 0x77, 0xa8, 0xf5, 0xee, 0x93, 0x4b, 0xb7, 0x33, 0xe6, 0xb8, 0x7b, 0x9f, 0xcf, - 0x22, 0xff, 0x4a, 0x26, 0xf8, 0x95, 0x09, 0x14, 0xdc, 0x4d, 0x81, 0xfe, 0x0f, 0x8a, 0x5e, 0xea, - 0xf6, 0xfe, 0x95, 0xc4, 0xe6, 0x7c, 0xe1, 0xde, 0x37, 0xa1, 0x9c, 0xc9, 0xd6, 0xce, 0x75, 0xd2, - 0x57, 0x7c, 0x3a, 0xc4, 0xbe, 0x56, 0xc0, 0x55, 0xfe, 0xe2, 0xd0, 0xe5, 0x42, 0xf2, 0x3f, 0xd3, - 0x50, 0x70, 0x6b, 0xe2, 0xe8, 0xb5, 0xc0, 0xbe, 0xab, 0xcc, 0xa8, 0x78, 0xb9, 0x8a, 0x7e, 0x5d, - 0x3b, 0xdc, 0xd7, 0xcc, 0xf2, 0x7d, 0x8d, 0xfb, 0x41, 0xe1, 0xfe, 0x2a, 0xca, 0x2d, 0xfd, 0xab, - 0xe8, 0x15, 0x40, 0x8e, 0xe1, 0xa8, 0x43, 0xca, 0xb1, 0x35, 0xfd, 0x5c, 0xe1, 0x93, 0xcd, 0xa1, - 0x5f, 0x8d, 0xbd, 0x79, 0xc8, 0x5e, 0x9c, 0xb0, 0x79, 0xff, 0x79, 0x1a, 0x0a, 0x5e, 0x0e, 0x5f, - 0xb6, 0x4c, 0x7d, 0x15, 0x56, 0x44, 0x9a, 0xe2, 0x75, 0x6a, 0xd1, 0xf2, 0xfe, 0x98, 0xe4, 0x02, - 0x7f, 0x4c, 0x24, 0x28, 0x8c, 0x88, 0xa3, 0xb2, 0x6c, 0xc0, 0x19, 0xa9, 0xd7, 0xbe, 0xfd, 0x26, - 0xac, 0x05, 0xfe, 0x18, 0xd0, 0x93, 0x77, 0xd4, 0x7a, 0xbf, 0x96, 0x92, 0x56, 0x3f, 0xff, 0xf2, - 0x46, 0xf6, 0x88, 0x7c, 0x46, 0xf7, 0x2c, 0x6e, 0x35, 0xdb, 0xad, 0xe6, 0xfd, 0x5a, 0x5a, 0x5a, - 0xfb, 0xfc, 0xcb, 0x1b, 0xab, 0x98, 0xb0, 0x6a, 0xdb, 0xed, 0x36, 0x94, 0x82, 0xab, 0x12, 0x8e, - 0x63, 0x08, 0x2a, 0xf7, 0x1e, 0x9c, 0x1c, 0x1e, 0x34, 0xf7, 0x3a, 0x2d, 0xe5, 0xe1, 0x71, 0xa7, - 0x55, 0x4b, 0xa3, 0xa7, 0x61, 0xe3, 0xf0, 0xe0, 0xdd, 0x76, 0x47, 0x69, 0x1e, 0x1e, 0xb4, 0x8e, - 0x3a, 0xca, 0x5e, 0xa7, 0xb3, 0xd7, 0xbc, 0x5f, 0xcb, 0xec, 0x7e, 0x55, 0x82, 0xea, 0x5e, 0xa3, - 0x79, 0x40, 0xb3, 0xb4, 0xd6, 0x53, 0x59, 0xb9, 0xa0, 0x09, 0x39, 0x56, 0x10, 0x98, 0x7b, 0x4b, - 0x43, 0x9a, 0x5f, 0x87, 0x45, 0xfb, 0x90, 0x67, 0xb5, 0x02, 0x34, 0xff, 0xda, 0x86, 0xb4, 0xa0, - 0x30, 0x4b, 0x3b, 0xc3, 0x8e, 0xc7, 0xdc, 0x7b, 0x1c, 0xd2, 0xfc, 0x3a, 0x2d, 0xc2, 0x50, 0xf4, - 0xb9, 0xc6, 0xe2, 0x7b, 0x0d, 0x52, 0x82, 0x60, 0x83, 0x0e, 0x61, 0xd5, 0xa5, 0x87, 0x8b, 0x6e, - 0x5a, 0x48, 0x0b, 0x0b, 0xa9, 0x74, 0xba, 0x38, 0x8d, 0x9f, 0x7f, 0x6d, 0x44, 0x5a, 0x50, 0x15, - 0x46, 0x07, 0xb0, 0x22, 0xf0, 0xf3, 0x82, 0xdb, 0x13, 0xd2, 0xa2, 0xc2, 0x28, 0x9d, 0x34, 0xbf, - 0x40, 0xb2, 0xf8, 0x32, 0x8c, 0x94, 0xa0, 0xe0, 0x8d, 0x1e, 0x00, 0x04, 0x48, 0x7b, 0x82, 0x5b, - 0x2e, 0x52, 0x92, 0x42, 0x36, 0x3a, 0x86, 0x82, 0xc7, 0xa1, 0x16, 0xde, 0x39, 0x91, 0x16, 0x57, - 0x94, 0xd1, 0x23, 0x28, 0x87, 0xb9, 0x43, 0xb2, 0x9b, 0x24, 0x52, 0xc2, 0x52, 0x31, 0xf5, 0x1f, - 0x26, 0x12, 0xc9, 0x6e, 0x96, 0x48, 0x09, 0x2b, 0xc7, 0xe8, 0x13, 0x58, 0x9f, 0x06, 0xfa, 0xc9, - 0x2f, 0x9a, 0x48, 0x4b, 0xd4, 0x92, 0xd1, 0x08, 0xd0, 0x0c, 0x82, 0xb0, 0xc4, 0xbd, 0x13, 0x69, - 0x99, 0xd2, 0x32, 0xea, 0x43, 0x35, 0x8a, 0xba, 0x93, 0xde, 0x43, 0x91, 0x12, 0x97, 0x99, 0xf9, - 0x57, 0xc2, 0xd8, 0x34, 0xe9, 0xbd, 0x14, 0x29, 0x71, 0xd5, 0x99, 0x1e, 0x87, 0x00, 0x0f, 0x48, - 0x70, 0x4f, 0x45, 0x4a, 0x52, 0x7f, 0x46, 0x26, 0x6c, 0xcc, 0xc2, 0xff, 0xcb, 0x5c, 0x5b, 0x91, - 0x96, 0x2a, 0x4b, 0x37, 0x5a, 0x5f, 0x7f, 0xb7, 0x99, 0xfe, 0xe6, 0xbb, 0xcd, 0xf4, 0x5f, 0xbf, - 0xdb, 0x4c, 0x7f, 0xf1, 0xfd, 0x66, 0xea, 0x9b, 0xef, 0x37, 0x53, 0x7f, 0xfa, 0x7e, 0x33, 0xf5, - 0xd3, 0x97, 0xcf, 0x35, 0x67, 0x30, 0xee, 0x6e, 0xf7, 0x8c, 0xd1, 0x4e, 0xf0, 0x96, 0xe1, 0xac, - 0x9b, 0x8f, 0xdd, 0x15, 0x96, 0xe9, 0xef, 0xfc, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xe2, 0x4c, - 0x31, 0x19, 0x29, 0x00, 0x00, + // 2958 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x9a, 0xcb, 0x73, 0xe3, 0xc6, + 0xd1, 0xc0, 0x09, 0xbe, 0x44, 0x36, 0x9f, 0x1a, 0xc9, 0x6b, 0x1a, 0xde, 0x95, 0x64, 0xb8, 0x6c, + 0xaf, 0xd7, 0x6b, 0xe9, 0xb3, 0xf6, 0xb3, 0xb3, 0x2e, 0xe7, 0x25, 0x71, 0x29, 0x53, 0x5e, 0x45, + 0x52, 0x46, 0xdc, 0x75, 0x39, 0xb1, 0x0d, 0x83, 0xe4, 0x48, 0x84, 0x97, 0x04, 0x60, 0x00, 0xa4, + 0xa5, 0x3d, 0xa6, 0x2a, 0x17, 0x57, 0xa5, 0xca, 0xc7, 0x5c, 0x7c, 0x4b, 0x55, 0x72, 0xc9, 0x21, + 0xb7, 0x9c, 0x72, 0x4a, 0x55, 0x7c, 0xc8, 0xc1, 0xc7, 0x1c, 0x52, 0x4e, 0xca, 0x7b, 0xcb, 0x3f, + 0x90, 0x4b, 0x52, 0x95, 0x9a, 0x07, 0x5e, 0x24, 0xc1, 0x47, 0x92, 0x5b, 0x6e, 0x98, 0x46, 0x77, + 0x63, 0x30, 0x98, 0xe9, 0xf9, 0x75, 0x63, 0xe0, 0x59, 0x97, 0x18, 0x5d, 0x62, 0x0f, 0x74, 0xc3, + 0xdd, 0xd1, 0xda, 0x1d, 0x7d, 0xc7, 0xbd, 0xb2, 0x88, 0xb3, 0x6d, 0xd9, 0xa6, 0x6b, 0xa2, 0x4a, + 0x70, 0x73, 0x9b, 0xde, 0x94, 0x6f, 0x84, 0xb4, 0x3b, 0xf6, 0x95, 0xe5, 0x9a, 0x3b, 0x96, 0x6d, + 0x9a, 0xe7, 0x5c, 0x5f, 0xbe, 0x1e, 0xba, 0xcd, 0xfc, 0x84, 0xbd, 0x45, 0xee, 0x0a, 0xe3, 0x47, + 0xe4, 0xca, 0xbb, 0x7b, 0x63, 0xc2, 0xd6, 0xd2, 0x6c, 0x6d, 0xe0, 0xdd, 0xde, 0xbc, 0x30, 0xcd, + 0x8b, 0x3e, 0xd9, 0x61, 0xad, 0xf6, 0xf0, 0x7c, 0xc7, 0xd5, 0x07, 0xc4, 0x71, 0xb5, 0x81, 0x25, + 0x14, 0xd6, 0x2f, 0xcc, 0x0b, 0x93, 0x5d, 0xee, 0xd0, 0x2b, 0x2e, 0x55, 0x7e, 0x99, 0x87, 0x15, + 0x4c, 0x3e, 0x19, 0x12, 0xc7, 0x45, 0xbb, 0x90, 0x26, 0x9d, 0x9e, 0x59, 0x93, 0xb6, 0xa4, 0x9b, + 0x85, 0xdd, 0xeb, 0xdb, 0x63, 0x2f, 0xb7, 0x2d, 0xf4, 0x1a, 0x9d, 0x9e, 0xd9, 0x4c, 0x60, 0xa6, + 0x8b, 0x5e, 0x87, 0xcc, 0x79, 0x7f, 0xe8, 0xf4, 0x6a, 0x49, 0x66, 0x74, 0x23, 0xce, 0xe8, 0x80, + 0x2a, 0x35, 0x13, 0x98, 0x6b, 0xd3, 0x47, 0xe9, 0xc6, 0xb9, 0x59, 0x4b, 0xcd, 0x7e, 0xd4, 0xa1, + 0x71, 0xce, 0x1e, 0x45, 0x75, 0xd1, 0x3e, 0x80, 0x6e, 0xe8, 0xae, 0xda, 0xe9, 0x69, 0xba, 0x51, + 0x4b, 0x33, 0xcb, 0xe7, 0xe2, 0x2d, 0x75, 0xb7, 0x4e, 0x15, 0x9b, 0x09, 0x9c, 0xd7, 0xbd, 0x06, + 0xed, 0xee, 0x27, 0x43, 0x62, 0x5f, 0xd5, 0x32, 0xb3, 0xbb, 0xfb, 0x43, 0xaa, 0x44, 0xbb, 0xcb, + 0xb4, 0xd1, 0xb7, 0x21, 0xd7, 0xe9, 0x91, 0xce, 0x23, 0xd5, 0xbd, 0xac, 0xad, 0x30, 0xcb, 0xcd, + 0x38, 0xcb, 0x3a, 0xd5, 0x6b, 0x5d, 0x36, 0x13, 0x78, 0xa5, 0xc3, 0x2f, 0xd1, 0x5d, 0xc8, 0x76, + 0xcc, 0xc1, 0x40, 0x77, 0x6b, 0xc0, 0x6c, 0x37, 0x62, 0x6d, 0x99, 0x56, 0x33, 0x81, 0x85, 0x3e, + 0x3a, 0x86, 0x72, 0x5f, 0x77, 0x5c, 0xd5, 0x31, 0x34, 0xcb, 0xe9, 0x99, 0xae, 0x53, 0x2b, 0x30, + 0x0f, 0x2f, 0xc4, 0x79, 0x38, 0xd2, 0x1d, 0xf7, 0xcc, 0x53, 0x6e, 0x26, 0x70, 0xa9, 0x1f, 0x16, + 0x50, 0x7f, 0xe6, 0xf9, 0x39, 0xb1, 0x7d, 0x87, 0xb5, 0xe2, 0x6c, 0x7f, 0x27, 0x54, 0xdb, 0xb3, + 0xa7, 0xfe, 0xcc, 0xb0, 0x00, 0xfd, 0x18, 0xd6, 0xfa, 0xa6, 0xd6, 0xf5, 0xdd, 0xa9, 0x9d, 0xde, + 0xd0, 0x78, 0x54, 0x2b, 0x31, 0xa7, 0x2f, 0xc7, 0x76, 0xd2, 0xd4, 0xba, 0x9e, 0x8b, 0x3a, 0x35, + 0x68, 0x26, 0xf0, 0x6a, 0x7f, 0x5c, 0x88, 0x3e, 0x84, 0x75, 0xcd, 0xb2, 0xfa, 0x57, 0xe3, 0xde, + 0xcb, 0xcc, 0xfb, 0xad, 0x38, 0xef, 0x7b, 0xd4, 0x66, 0xdc, 0x3d, 0xd2, 0x26, 0xa4, 0xa8, 0x05, + 0x55, 0xcb, 0x26, 0x96, 0x66, 0x13, 0xd5, 0xb2, 0x4d, 0xcb, 0x74, 0xb4, 0x7e, 0xad, 0xc2, 0x7c, + 0xbf, 0x14, 0xe7, 0xfb, 0x94, 0xeb, 0x9f, 0x0a, 0xf5, 0x66, 0x02, 0x57, 0xac, 0xa8, 0x88, 0x7b, + 0x35, 0x3b, 0xc4, 0x71, 0x02, 0xaf, 0xd5, 0x79, 0x5e, 0x99, 0x7e, 0xd4, 0x6b, 0x44, 0x84, 0x1a, + 0x50, 0x20, 0x97, 0xd4, 0x5c, 0x1d, 0x99, 0x2e, 0xa9, 0xad, 0x32, 0x87, 0x4a, 0xec, 0x0a, 0x65, + 0xaa, 0x0f, 0x4d, 0x97, 0x34, 0x13, 0x18, 0x88, 0xdf, 0x42, 0x1a, 0x3c, 0x35, 0x22, 0xb6, 0x7e, + 0x7e, 0xc5, 0xdc, 0xa8, 0xec, 0x8e, 0xa3, 0x9b, 0x46, 0x0d, 0x31, 0x87, 0xaf, 0xc4, 0x39, 0x7c, + 0xc8, 0x8c, 0xa8, 0x8b, 0x86, 0x67, 0xd2, 0x4c, 0xe0, 0xb5, 0xd1, 0xa4, 0x98, 0x4e, 0xb1, 0x73, + 0xdd, 0xd0, 0xfa, 0xfa, 0x63, 0xa2, 0xb6, 0xfb, 0x66, 0xe7, 0x51, 0x6d, 0x6d, 0xf6, 0x14, 0x3b, + 0x10, 0xda, 0xfb, 0x54, 0x99, 0x4e, 0xb1, 0xf3, 0xb0, 0x60, 0x7f, 0x05, 0x32, 0x23, 0xad, 0x3f, + 0x24, 0xca, 0x4b, 0x50, 0x08, 0x05, 0x20, 0x54, 0x83, 0x95, 0x01, 0x71, 0x1c, 0xed, 0x82, 0xb0, + 0x78, 0x95, 0xc7, 0x5e, 0x53, 0x29, 0x43, 0x31, 0x1c, 0x74, 0x94, 0xcf, 0x25, 0xdf, 0x92, 0xc6, + 0x13, 0x6a, 0x39, 0x22, 0x36, 0x7b, 0x6d, 0x61, 0x29, 0x9a, 0xe8, 0x79, 0x28, 0xb1, 0x2e, 0xab, + 0xde, 0x7d, 0x1a, 0xd4, 0xd2, 0xb8, 0xc8, 0x84, 0x0f, 0x85, 0xd2, 0x26, 0x14, 0xac, 0x5d, 0xcb, + 0x57, 0x49, 0x31, 0x15, 0xb0, 0x76, 0x2d, 0x4f, 0xe1, 0x39, 0x28, 0xd2, 0xf7, 0xf3, 0x35, 0xd2, + 0xec, 0x21, 0x05, 0x2a, 0x13, 0x2a, 0xca, 0x1f, 0x93, 0x50, 0x1d, 0x0f, 0x54, 0xe8, 0x2e, 0xa4, + 0x69, 0xcc, 0x16, 0xe1, 0x57, 0xde, 0xe6, 0x01, 0x7d, 0xdb, 0x0b, 0xe8, 0xdb, 0x2d, 0x2f, 0xa0, + 0xef, 0xe7, 0xbe, 0xfc, 0x7a, 0x33, 0xf1, 0xf9, 0x5f, 0x36, 0x25, 0xcc, 0x2c, 0xd0, 0x33, 0x34, + 0x3c, 0x69, 0xba, 0xa1, 0xea, 0x5d, 0xd6, 0xe5, 0x3c, 0x8d, 0x3d, 0x9a, 0x6e, 0x1c, 0x76, 0xd1, + 0x11, 0x54, 0x3b, 0xa6, 0xe1, 0x10, 0xc3, 0x19, 0x3a, 0x2a, 0xdf, 0x30, 0x44, 0xd0, 0x8d, 0x84, + 0x4e, 0xbe, 0x0d, 0xd5, 0x3d, 0xcd, 0x53, 0xa6, 0x88, 0x2b, 0x9d, 0xa8, 0x00, 0x1d, 0x00, 0x8c, + 0xb4, 0xbe, 0xde, 0xd5, 0x5c, 0xd3, 0x76, 0x6a, 0xe9, 0xad, 0xd4, 0xcd, 0xc2, 0xee, 0xd6, 0xc4, + 0x87, 0x7d, 0xe8, 0xa9, 0x3c, 0xb0, 0xba, 0x9a, 0x4b, 0xf6, 0xd3, 0xb4, 0xbb, 0x38, 0x64, 0x89, + 0x5e, 0x84, 0x8a, 0x66, 0x59, 0xaa, 0xe3, 0x6a, 0x2e, 0x51, 0xdb, 0x57, 0x2e, 0x71, 0x58, 0x40, + 0x2e, 0xe2, 0x92, 0x66, 0x59, 0x67, 0x54, 0xba, 0x4f, 0x85, 0xe8, 0x05, 0x28, 0xd3, 0xd8, 0xad, + 0x6b, 0x7d, 0xb5, 0x47, 0xf4, 0x8b, 0x9e, 0x5b, 0xcb, 0x6e, 0x49, 0x37, 0x53, 0xb8, 0x24, 0xa4, + 0x4d, 0x26, 0x54, 0xba, 0xfe, 0x17, 0x67, 0x71, 0x1b, 0x21, 0x48, 0x77, 0x35, 0x57, 0x63, 0x23, + 0x59, 0xc4, 0xec, 0x9a, 0xca, 0x2c, 0xcd, 0xed, 0x89, 0xf1, 0x61, 0xd7, 0xe8, 0x1a, 0x64, 0x85, + 0xdb, 0x14, 0x73, 0x2b, 0x5a, 0x68, 0x1d, 0x32, 0x96, 0x6d, 0x8e, 0x08, 0xfb, 0x74, 0x39, 0xcc, + 0x1b, 0x0a, 0x86, 0x72, 0x34, 0xc6, 0xa3, 0x32, 0x24, 0xdd, 0x4b, 0xf1, 0x94, 0xa4, 0x7b, 0x89, + 0xfe, 0x0f, 0xd2, 0x74, 0x20, 0xd9, 0x33, 0xca, 0x53, 0x76, 0x35, 0x61, 0xd7, 0xba, 0xb2, 0x08, + 0x66, 0x9a, 0x4a, 0x05, 0x4a, 0x91, 0xd8, 0xaf, 0x5c, 0x83, 0xf5, 0x69, 0xa1, 0x5c, 0xe9, 0xf9, + 0xf2, 0x48, 0x48, 0x46, 0xaf, 0x43, 0xce, 0x8f, 0xe5, 0x7c, 0xe2, 0x3c, 0x33, 0xf1, 0x58, 0x4f, + 0x19, 0xfb, 0xaa, 0x74, 0xc6, 0xd0, 0x0f, 0xd0, 0xd3, 0xc4, 0xce, 0x5d, 0xc4, 0x2b, 0x9a, 0x65, + 0x35, 0x35, 0xa7, 0xa7, 0x7c, 0x04, 0xb5, 0xb8, 0x38, 0x1d, 0x1a, 0x30, 0x89, 0x4d, 0x7b, 0x6f, + 0xc0, 0xae, 0x41, 0xf6, 0xdc, 0xb4, 0x07, 0x9a, 0xcb, 0x9c, 0x95, 0xb0, 0x68, 0xd1, 0x81, 0xe4, + 0x31, 0x3b, 0xc5, 0xc4, 0xbc, 0xa1, 0xa8, 0xf0, 0x4c, 0x6c, 0xac, 0xa6, 0x26, 0xba, 0xd1, 0x25, + 0x7c, 0x58, 0x4b, 0x98, 0x37, 0x02, 0x47, 0xbc, 0xb3, 0xbc, 0x41, 0x1f, 0xeb, 0xb0, 0x77, 0x65, + 0xfe, 0xf3, 0x58, 0xb4, 0x94, 0x9f, 0x49, 0x70, 0x6d, 0x7a, 0xc4, 0x46, 0x37, 0x00, 0xf8, 0x12, + 0x17, 0x13, 0x24, 0x75, 0xb3, 0x88, 0xf3, 0x4c, 0x72, 0x8f, 0xce, 0x92, 0x17, 0xa1, 0x12, 0xdc, + 0x56, 0x1d, 0xfd, 0x31, 0xff, 0x98, 0x29, 0x5c, 0xf2, 0x75, 0xce, 0xf4, 0xc7, 0x04, 0xdd, 0x86, + 0x0c, 0x8d, 0xa0, 0x74, 0x2d, 0xd1, 0x35, 0x70, 0x6d, 0x72, 0x2d, 0xd1, 0xa8, 0x88, 0xb9, 0x92, + 0xf2, 0x3d, 0x58, 0x9d, 0x88, 0xcc, 0xe8, 0x16, 0xa4, 0x59, 0x2c, 0xe7, 0x5f, 0x2d, 0xce, 0x03, + 0xd3, 0x51, 0x9a, 0x20, 0xc7, 0x47, 0xe2, 0xa5, 0x3c, 0xb5, 0x43, 0x23, 0x13, 0xdd, 0x62, 0xde, + 0xa0, 0xdf, 0x56, 0xa3, 0x83, 0xc9, 0xfd, 0xd4, 0x26, 0xfd, 0x34, 0xd9, 0x7d, 0xb1, 0x9e, 0x85, + 0x36, 0xaa, 0x42, 0xca, 0xbd, 0x74, 0x6a, 0x49, 0x36, 0x94, 0xf4, 0x52, 0xf9, 0x4d, 0xd2, 0x9f, + 0xac, 0x91, 0xe0, 0xee, 0xa9, 0x4a, 0xbe, 0x2a, 0x5d, 0x95, 0xa1, 0x39, 0xc8, 0xae, 0x63, 0x57, + 0x65, 0xd0, 0xc1, 0xf4, 0x52, 0x1d, 0x3c, 0x81, 0x6a, 0x5f, 0x73, 0x5c, 0x95, 0x33, 0x95, 0xca, + 0xb8, 0x33, 0x13, 0x03, 0x71, 0x47, 0x9a, 0xb7, 0x12, 0xe9, 0x56, 0x21, 0x1c, 0x95, 0xfb, 0x11, + 0x29, 0xc2, 0xb0, 0xde, 0xbe, 0x7a, 0xac, 0x19, 0xae, 0x6e, 0x10, 0x35, 0x14, 0x0f, 0xb3, 0x6c, + 0x2e, 0x4c, 0xae, 0xbf, 0xc6, 0x48, 0xef, 0x12, 0xa3, 0xe3, 0x05, 0xc2, 0x35, 0xdf, 0xd8, 0x0f, + 0x94, 0x8e, 0xf2, 0x8f, 0x3c, 0xe4, 0x30, 0x71, 0x2c, 0x1a, 0x70, 0xd1, 0x3e, 0xe4, 0xc9, 0x65, + 0x87, 0x58, 0xae, 0xb7, 0x47, 0x4d, 0xdf, 0xeb, 0xb9, 0x76, 0xc3, 0xd3, 0xa4, 0xa4, 0xeb, 0x9b, + 0xa1, 0x3b, 0x02, 0xe6, 0xe3, 0xb9, 0x5c, 0x98, 0x87, 0x69, 0xfe, 0x0d, 0x8f, 0xe6, 0x53, 0xb1, + 0xa0, 0xca, 0xad, 0xc6, 0x70, 0xfe, 0x8e, 0xc0, 0xf9, 0xf4, 0x9c, 0x87, 0x45, 0x78, 0xbe, 0x1e, + 0xe1, 0xf9, 0xcc, 0x9c, 0xd7, 0x8c, 0x01, 0xfa, 0x37, 0x3c, 0xa0, 0xcf, 0xce, 0xe9, 0xf1, 0x18, + 0xd1, 0x7f, 0x27, 0x44, 0xf4, 0x39, 0x66, 0xba, 0x15, 0x6b, 0x3a, 0x05, 0xe9, 0xdf, 0xf4, 0x91, + 0xbe, 0x10, 0x9b, 0x0e, 0x08, 0xe3, 0x71, 0xa6, 0x3f, 0x99, 0x60, 0x7a, 0xce, 0xe0, 0x2f, 0xc6, + 0xba, 0x98, 0x03, 0xf5, 0x27, 0x13, 0x50, 0x5f, 0x9a, 0xe3, 0x70, 0x0e, 0xd5, 0xbf, 0x3f, 0x9d, + 0xea, 0xe3, 0xb9, 0x5b, 0x74, 0x73, 0x31, 0xac, 0x57, 0x63, 0xb0, 0xbe, 0x12, 0x8b, 0xa0, 0xdc, + 0xfd, 0xc2, 0x5c, 0xff, 0x60, 0x0a, 0xd7, 0x73, 0x02, 0xbf, 0x19, 0xeb, 0x7c, 0x01, 0xb0, 0x7f, + 0x30, 0x05, 0xec, 0x57, 0xe7, 0xba, 0x9d, 0x4b, 0xf6, 0x07, 0x51, 0xb2, 0xe7, 0x20, 0xfe, 0xfc, + 0x8c, 0xd5, 0x1e, 0x83, 0xf6, 0xed, 0x38, 0xb4, 0xe7, 0xf8, 0x7d, 0x3b, 0xd6, 0xe3, 0x12, 0x6c, + 0x7f, 0x32, 0xc1, 0xf6, 0xeb, 0x73, 0x66, 0xda, 0xa2, 0x70, 0xff, 0x32, 0xdd, 0x21, 0xc7, 0xe2, + 0x19, 0xdd, 0xf4, 0x89, 0x6d, 0x9b, 0xb6, 0xc0, 0x74, 0xde, 0x50, 0x6e, 0x52, 0xd8, 0x0b, 0x62, + 0xd7, 0x8c, 0x44, 0x80, 0xc1, 0x55, 0x28, 0x5e, 0x29, 0xbf, 0x95, 0x02, 0x5b, 0x16, 0xc9, 0xc3, + 0xa0, 0x98, 0x17, 0xa0, 0x18, 0x4a, 0x0f, 0x92, 0xd1, 0xf4, 0x60, 0x13, 0x0a, 0x14, 0x9a, 0xc6, + 0xc8, 0x5f, 0xb3, 0x7c, 0xf2, 0xbf, 0x05, 0xab, 0x6c, 0xa7, 0xe1, 0x08, 0x21, 0x36, 0xb1, 0x34, + 0xdb, 0xc4, 0x2a, 0xf4, 0x06, 0x1f, 0x05, 0xbe, 0x9b, 0xbd, 0x0a, 0x6b, 0x21, 0x5d, 0x1f, 0xc6, + 0x38, 0x06, 0x57, 0x7d, 0xed, 0x3d, 0x41, 0x65, 0xbf, 0x97, 0x82, 0x11, 0x0a, 0x52, 0x86, 0x69, + 0x74, 0x2f, 0xfd, 0x97, 0xe8, 0x3e, 0xf9, 0x6f, 0xd3, 0x7d, 0x18, 0x2e, 0x53, 0x51, 0xb8, 0xfc, + 0xbb, 0x14, 0x7c, 0x13, 0x9f, 0xd5, 0x3b, 0x66, 0x97, 0x08, 0xdc, 0x63, 0xd7, 0x94, 0x13, 0xfa, + 0xe6, 0x85, 0x80, 0x3a, 0x7a, 0x49, 0xb5, 0xfc, 0x0d, 0x26, 0x2f, 0xf6, 0x0f, 0x9f, 0x14, 0x33, + 0x6c, 0x84, 0x05, 0x29, 0x56, 0x21, 0xf5, 0x88, 0xf0, 0xed, 0xa0, 0x88, 0xe9, 0x25, 0xd5, 0x63, + 0x93, 0x8c, 0x55, 0x6e, 0x8a, 0x98, 0x37, 0xd0, 0x5d, 0xc8, 0xb3, 0xca, 0x9c, 0x6a, 0x5a, 0x8e, + 0xd8, 0x01, 0x9e, 0x0d, 0xbf, 0x2b, 0x2f, 0xc0, 0x6d, 0x9f, 0x52, 0x9d, 0x13, 0xcb, 0xc1, 0x39, + 0x4b, 0x5c, 0x85, 0xf8, 0x24, 0x1f, 0xe1, 0x93, 0xeb, 0x90, 0xa7, 0xbd, 0x77, 0x2c, 0xad, 0x43, + 0x58, 0xa5, 0x27, 0x8f, 0x03, 0x81, 0xf2, 0xe7, 0x24, 0x54, 0xc6, 0x36, 0x94, 0xa9, 0xef, 0xee, + 0x4d, 0xc9, 0x64, 0x28, 0x77, 0x59, 0x6c, 0x3c, 0x36, 0x00, 0x2e, 0x34, 0x47, 0xfd, 0x54, 0x33, + 0x5c, 0xd2, 0x15, 0x83, 0x12, 0x92, 0x20, 0x19, 0x72, 0xb4, 0x35, 0x74, 0x48, 0x57, 0xa4, 0x51, + 0x7e, 0x1b, 0x35, 0x21, 0x4b, 0x46, 0xc4, 0x70, 0x9d, 0xda, 0xca, 0x24, 0xd0, 0x0a, 0x88, 0x21, + 0x86, 0xbb, 0x5f, 0xa3, 0x1f, 0xfb, 0x6f, 0x5f, 0x6f, 0x56, 0xb9, 0xf6, 0x6d, 0x73, 0xa0, 0xbb, + 0x64, 0x60, 0xb9, 0x57, 0x58, 0xd8, 0x47, 0x47, 0x21, 0x37, 0x36, 0x0a, 0x21, 0x62, 0xcf, 0x87, + 0x89, 0x9d, 0xf6, 0xcd, 0xb2, 0x75, 0xd3, 0xd6, 0xdd, 0x2b, 0x36, 0x74, 0x29, 0xec, 0xb7, 0x69, + 0x56, 0x3e, 0x20, 0x03, 0xcb, 0x34, 0xfb, 0x2a, 0x0f, 0x07, 0x05, 0x66, 0x5a, 0x14, 0xc2, 0x06, + 0x8b, 0x0a, 0x3f, 0x4d, 0x06, 0xeb, 0xe3, 0x1e, 0xe9, 0xeb, 0x23, 0x62, 0xff, 0x2f, 0x0e, 0xb0, + 0x72, 0x48, 0x93, 0xd4, 0x30, 0x79, 0x4c, 0x7d, 0xdf, 0xe7, 0xa1, 0x64, 0x13, 0x57, 0xd3, 0x0d, + 0x35, 0x42, 0xda, 0x45, 0x2e, 0x14, 0x59, 0xf5, 0x29, 0x3c, 0x35, 0x95, 0x40, 0xd0, 0xb7, 0x20, + 0x1f, 0xc0, 0x8b, 0x14, 0x03, 0xbd, 0x7e, 0xd2, 0x19, 0xe8, 0x2a, 0xbf, 0x93, 0x02, 0x97, 0xd1, + 0x34, 0xb6, 0x01, 0x59, 0x9b, 0x38, 0xc3, 0x3e, 0x4f, 0x2c, 0xcb, 0xbb, 0xaf, 0x2e, 0xc6, 0x2e, + 0x54, 0x3a, 0xec, 0xbb, 0x58, 0x18, 0x2b, 0x1f, 0x42, 0x96, 0x4b, 0x50, 0x01, 0x56, 0x1e, 0x1c, + 0xdf, 0x3f, 0x3e, 0x79, 0xf7, 0xb8, 0x9a, 0x40, 0x00, 0xd9, 0xbd, 0x7a, 0xbd, 0x71, 0xda, 0xaa, + 0x4a, 0x28, 0x0f, 0x99, 0xbd, 0xfd, 0x13, 0xdc, 0xaa, 0x26, 0xa9, 0x18, 0x37, 0xde, 0x69, 0xd4, + 0x5b, 0xd5, 0x14, 0x5a, 0x85, 0x12, 0xbf, 0x56, 0x0f, 0x4e, 0xf0, 0x0f, 0xf6, 0x5a, 0xd5, 0x74, + 0x48, 0x74, 0xd6, 0x38, 0xbe, 0xd7, 0xc0, 0xd5, 0x8c, 0xf2, 0x1a, 0xcd, 0x5c, 0x63, 0x68, 0x27, + 0xc8, 0x51, 0xa5, 0x50, 0x8e, 0xaa, 0xfc, 0x3c, 0x49, 0x73, 0xb7, 0x38, 0x84, 0x41, 0xef, 0x8c, + 0xbd, 0xf8, 0xee, 0x12, 0xfc, 0x33, 0xf6, 0xf6, 0xe8, 0x05, 0x28, 0xdb, 0xe4, 0x9c, 0xb8, 0x9d, + 0x1e, 0x47, 0x2a, 0x1e, 0xc3, 0x4b, 0xb8, 0x24, 0xa4, 0xcc, 0xc8, 0xe1, 0x6a, 0x1f, 0x93, 0x8e, + 0xab, 0xf2, 0xc5, 0xc7, 0x93, 0xd8, 0x3c, 0x55, 0xa3, 0xd2, 0x33, 0x2e, 0x54, 0x3e, 0x5a, 0x6a, + 0x2c, 0xf3, 0x90, 0xc1, 0x8d, 0x16, 0x7e, 0xaf, 0x9a, 0x42, 0x08, 0xca, 0xec, 0x52, 0x3d, 0x3b, + 0xde, 0x3b, 0x3d, 0x6b, 0x9e, 0xd0, 0xb1, 0x5c, 0x83, 0x8a, 0x37, 0x96, 0x9e, 0x30, 0xa3, 0xdc, + 0x85, 0xa7, 0x63, 0xf8, 0x6b, 0x4e, 0x9a, 0xae, 0xbc, 0x0f, 0x68, 0x12, 0x88, 0xd0, 0x01, 0x94, + 0xc7, 0xd8, 0x47, 0x9a, 0x84, 0xf3, 0x20, 0x23, 0xf6, 0xb9, 0x06, 0x97, 0x46, 0xe1, 0xa6, 0xf2, + 0x0b, 0x09, 0x9e, 0x9d, 0x41, 0x47, 0xe8, 0xfe, 0xd8, 0x37, 0xbb, 0xb3, 0x0c, 0x5b, 0x8d, 0x4f, + 0xd9, 0xbb, 0x0b, 0x0d, 0xf3, 0xd9, 0xd1, 0xde, 0x59, 0x33, 0x3a, 0x65, 0x95, 0x5f, 0x4b, 0xe1, + 0xf1, 0x8b, 0x52, 0xe5, 0xdb, 0x63, 0x5d, 0xdc, 0x59, 0x14, 0x51, 0xc7, 0xe7, 0x94, 0x0c, 0x39, + 0x22, 0xd2, 0x57, 0x91, 0xe2, 0xfb, 0x6d, 0xe5, 0xd5, 0xf9, 0x5d, 0x0f, 0xfa, 0x9b, 0x54, 0xfe, + 0x90, 0x0c, 0x56, 0x7f, 0xb4, 0x2e, 0xf0, 0xff, 0x41, 0x5d, 0x60, 0x56, 0x0a, 0xe8, 0xc7, 0x75, + 0x5e, 0x3b, 0x38, 0x83, 0x55, 0x1f, 0x3a, 0xd4, 0x21, 0x83, 0x91, 0x65, 0xa9, 0xa5, 0x3a, 0x8a, + 0x8a, 0x1d, 0xf4, 0x1e, 0x3c, 0x3d, 0x46, 0x54, 0xbe, 0xeb, 0x85, 0xcb, 0xa6, 0x4f, 0x45, 0xc1, + 0xca, 0x73, 0x1d, 0x6c, 0x01, 0xe9, 0xff, 0x6c, 0x0b, 0x50, 0x3e, 0x80, 0x72, 0xb4, 0x50, 0x41, + 0x63, 0x8f, 0x6d, 0x0e, 0x8d, 0x2e, 0xfb, 0xdc, 0x19, 0xcc, 0x1b, 0xe8, 0x75, 0xaf, 0x4a, 0x95, + 0x8c, 0x09, 0xd2, 0x74, 0x66, 0x86, 0x0a, 0x1d, 0xa2, 0x5c, 0xf5, 0x18, 0x32, 0xac, 0x27, 0x74, + 0xeb, 0x60, 0xf5, 0x4c, 0x81, 0xc7, 0xf4, 0x1a, 0x7d, 0x00, 0xa0, 0xb9, 0xae, 0xad, 0xb7, 0x87, + 0x81, 0xe3, 0xcd, 0xe9, 0x6f, 0xb2, 0xe7, 0xe9, 0xed, 0x5f, 0x17, 0xaf, 0xb4, 0x1e, 0x98, 0x86, + 0x5e, 0x2b, 0xe4, 0x50, 0x39, 0x86, 0x72, 0xd4, 0xd6, 0x03, 0x3a, 0xde, 0x87, 0x28, 0xd0, 0x71, + 0x3e, 0x17, 0x40, 0xe7, 0xe3, 0x60, 0x8a, 0x17, 0x6d, 0x59, 0x43, 0xf9, 0x4c, 0x82, 0x5c, 0xeb, + 0x52, 0x4c, 0xd3, 0x68, 0xf9, 0x32, 0x52, 0xef, 0xe5, 0xa6, 0xc9, 0x70, 0xcd, 0x91, 0x57, 0x77, + 0x53, 0x7e, 0x75, 0xf7, 0xfb, 0xfe, 0x9a, 0x4a, 0xcf, 0xa9, 0x55, 0xf8, 0x13, 0xd5, 0xab, 0x44, + 0x89, 0xb5, 0xfe, 0x16, 0xe4, 0xfd, 0x79, 0x48, 0xf3, 0x0c, 0xad, 0xdb, 0xb5, 0x89, 0xe3, 0x88, + 0x0d, 0xc3, 0x6b, 0xb2, 0xf2, 0xb3, 0xf9, 0xa9, 0xa8, 0x6a, 0xa6, 0x30, 0x6f, 0x28, 0x5d, 0xa8, + 0x8c, 0x4d, 0x62, 0xf4, 0x16, 0xac, 0x58, 0xc3, 0xb6, 0xea, 0x0d, 0xcf, 0xd8, 0x8f, 0x54, 0x8f, + 0x60, 0x87, 0xed, 0xbe, 0xde, 0xb9, 0x4f, 0xae, 0xbc, 0xce, 0x58, 0xc3, 0xf6, 0x7d, 0x3e, 0x8a, + 0xfc, 0x29, 0xc9, 0xf0, 0x53, 0x46, 0x90, 0xf3, 0x26, 0x05, 0xfa, 0x2e, 0xe4, 0xfd, 0xf5, 0xe1, + 0xff, 0x95, 0x88, 0x5d, 0x58, 0xc2, 0x7d, 0x60, 0x42, 0xd3, 0x21, 0x47, 0xbf, 0x30, 0x48, 0x57, + 0x0d, 0x32, 0x1d, 0xf6, 0xb4, 0x1c, 0xae, 0xf0, 0x1b, 0x47, 0x5e, 0x9a, 0xa3, 0xfc, 0x53, 0x82, + 0x9c, 0x57, 0x27, 0x43, 0xaf, 0x85, 0xe6, 0x5d, 0x79, 0x4a, 0x39, 0xc9, 0x53, 0x0c, 0x0a, 0xe9, + 0xd1, 0xbe, 0x26, 0x97, 0xef, 0x6b, 0x5c, 0xd1, 0xd1, 0xfb, 0x29, 0x93, 0x5e, 0xfa, 0xa7, 0xcc, + 0x6d, 0x40, 0xae, 0xe9, 0x6a, 0x7d, 0x9a, 0x8f, 0xeb, 0xc6, 0x85, 0xca, 0x07, 0x9b, 0x53, 0x63, + 0x95, 0xdd, 0x79, 0xc8, 0x6e, 0x9c, 0xb2, 0x71, 0xff, 0x89, 0x04, 0x39, 0x9f, 0x86, 0x96, 0x2d, + 0xb3, 0x5f, 0x83, 0xac, 0xd8, 0xf0, 0x79, 0x9d, 0x5d, 0xb4, 0xfc, 0xea, 0x6a, 0x3a, 0x54, 0x5d, + 0x95, 0x21, 0x37, 0x20, 0xae, 0xc6, 0xf6, 0x55, 0x9e, 0x6c, 0xfa, 0xed, 0x5b, 0x6f, 0x42, 0x21, + 0xf4, 0x8b, 0x82, 0xae, 0xbc, 0xe3, 0xc6, 0xbb, 0xd5, 0x84, 0xbc, 0xf2, 0xd9, 0x17, 0x5b, 0xa9, + 0x63, 0xf2, 0x29, 0x9d, 0xb3, 0xb8, 0x51, 0x6f, 0x36, 0xea, 0xf7, 0xab, 0x92, 0x5c, 0xf8, 0xec, + 0x8b, 0xad, 0x15, 0x4c, 0x58, 0x49, 0xec, 0x56, 0x13, 0x8a, 0xe1, 0xaf, 0x12, 0xdd, 0x11, 0x10, + 0x94, 0xef, 0x3d, 0x38, 0x3d, 0x3a, 0xac, 0xef, 0xb5, 0x1a, 0xea, 0xc3, 0x93, 0x56, 0xa3, 0x2a, + 0xa1, 0xa7, 0x61, 0xed, 0xe8, 0xf0, 0xed, 0x66, 0x4b, 0xad, 0x1f, 0x1d, 0x36, 0x8e, 0x5b, 0xea, + 0x5e, 0xab, 0xb5, 0x57, 0xbf, 0x5f, 0x4d, 0xee, 0xfe, 0xaa, 0x00, 0x95, 0xbd, 0xfd, 0xfa, 0x21, + 0xe5, 0x1d, 0xbd, 0xa3, 0xb1, 0x4a, 0x40, 0x1d, 0xd2, 0x2c, 0xd7, 0x9f, 0x79, 0x26, 0x41, 0x9e, + 0x5d, 0xe4, 0x44, 0x07, 0x90, 0x61, 0x65, 0x00, 0x34, 0xfb, 0x90, 0x82, 0x3c, 0xa7, 0xea, 0x49, + 0x3b, 0xc3, 0x96, 0xc7, 0xcc, 0x53, 0x0b, 0xf2, 0xec, 0x22, 0x28, 0x3a, 0x82, 0x15, 0x2f, 0x0b, + 0x9c, 0x77, 0x94, 0x40, 0x9e, 0x5b, 0x99, 0xa4, 0xaf, 0xc6, 0xb3, 0xe9, 0xd9, 0x07, 0x1a, 0xe4, + 0x39, 0xe5, 0x51, 0x74, 0x08, 0x59, 0x91, 0x35, 0xcc, 0x39, 0xa3, 0x20, 0xcf, 0x2b, 0x78, 0x22, + 0x0c, 0xf9, 0xa0, 0x4e, 0x31, 0xff, 0x98, 0x86, 0xbc, 0x40, 0xe5, 0x17, 0x7d, 0x08, 0xa5, 0x68, + 0x26, 0xb2, 0xd8, 0x39, 0x08, 0x79, 0xc1, 0xd2, 0x2a, 0xf5, 0x1f, 0x4d, 0x4b, 0x16, 0x3b, 0x17, + 0x21, 0x2f, 0x58, 0x69, 0x45, 0x1f, 0xc3, 0xea, 0x64, 0xda, 0xb0, 0xf8, 0x31, 0x09, 0x79, 0x89, + 0xda, 0x2b, 0x1a, 0x00, 0x9a, 0x92, 0x6e, 0x2c, 0x71, 0x6a, 0x42, 0x5e, 0xa6, 0x14, 0x8b, 0xba, + 0x50, 0x19, 0x67, 0xf8, 0x45, 0x4f, 0x51, 0xc8, 0x0b, 0x97, 0x65, 0xf9, 0x53, 0xa2, 0xa4, 0xbb, + 0xe8, 0xa9, 0x0a, 0x79, 0xe1, 0x2a, 0x2d, 0x7a, 0x00, 0x10, 0xca, 0x2a, 0x16, 0x38, 0x65, 0x21, + 0x2f, 0x52, 0xaf, 0x45, 0x16, 0xac, 0x4d, 0xcb, 0x26, 0x96, 0x39, 0x74, 0x21, 0x2f, 0x55, 0xc6, + 0xa5, 0xf3, 0x39, 0x0a, 0xda, 0x8b, 0x1d, 0xc2, 0x90, 0x17, 0xac, 0xe7, 0xee, 0x37, 0xbe, 0xfc, + 0x66, 0x43, 0xfa, 0xea, 0x9b, 0x0d, 0xe9, 0xaf, 0xdf, 0x6c, 0x48, 0x9f, 0x3f, 0xd9, 0x48, 0x7c, + 0xf5, 0x64, 0x23, 0xf1, 0xa7, 0x27, 0x1b, 0x89, 0x1f, 0xbd, 0x72, 0xa1, 0xbb, 0xbd, 0x61, 0x7b, + 0xbb, 0x63, 0x0e, 0x76, 0xc2, 0xe7, 0xd6, 0xa6, 0x9d, 0xa5, 0x6b, 0x67, 0xd9, 0x6e, 0x7a, 0xe7, + 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x59, 0x1d, 0x83, 0x17, 0x6b, 0x27, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3727,13 +3569,13 @@ type ABCIApplicationClient interface { Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) - DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) + // rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) - BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) - EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) + // rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); + // rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) @@ -3742,6 +3584,7 @@ type ABCIApplicationClient interface { ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) + FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) } type aBCIApplicationClient struct { @@ -3779,15 +3622,6 @@ func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts return out, nil } -func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { - out := new(ResponseDeliverTx) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/DeliverTx", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { out := new(ResponseCheckTx) err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/CheckTx", in, out, opts...) @@ -3824,24 +3658,6 @@ func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitCh return out, nil } -func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { - out := new(ResponseBeginBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/BeginBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { - out := new(ResponseEndBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/EndBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *aBCIApplicationClient) ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) { out := new(ResponseListSnapshots) err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ListSnapshots", in, out, opts...) @@ -3914,18 +3730,27 @@ func (c *aBCIApplicationClient) VerifyVoteExtension(ctx context.Context, in *Req return out, nil } +func (c *aBCIApplicationClient) FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) { + out := new(ResponseFinalizeBlock) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/FinalizeBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ABCIApplicationServer is the server API for ABCIApplication service. type ABCIApplicationServer interface { Echo(context.Context, *RequestEcho) (*ResponseEcho, error) Flush(context.Context, *RequestFlush) (*ResponseFlush, error) Info(context.Context, *RequestInfo) (*ResponseInfo, error) - DeliverTx(context.Context, *RequestDeliverTx) (*ResponseDeliverTx, error) + // rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) Query(context.Context, *RequestQuery) (*ResponseQuery, error) Commit(context.Context, *RequestCommit) (*ResponseCommit, error) InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) - BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) - EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) + // rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); + // rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) @@ -3934,6 +3759,7 @@ type ABCIApplicationServer interface { ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) + FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) } // UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. @@ -3949,9 +3775,6 @@ func (*UnimplementedABCIApplicationServer) Flush(ctx context.Context, req *Reque func (*UnimplementedABCIApplicationServer) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") } -func (*UnimplementedABCIApplicationServer) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeliverTx not implemented") -} func (*UnimplementedABCIApplicationServer) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") } @@ -3964,12 +3787,6 @@ func (*UnimplementedABCIApplicationServer) Commit(ctx context.Context, req *Requ func (*UnimplementedABCIApplicationServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") } -func (*UnimplementedABCIApplicationServer) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method BeginBlock not implemented") -} -func (*UnimplementedABCIApplicationServer) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method EndBlock not implemented") -} func (*UnimplementedABCIApplicationServer) ListSnapshots(ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") } @@ -3994,6 +3811,9 @@ func (*UnimplementedABCIApplicationServer) ExtendVote(ctx context.Context, req * func (*UnimplementedABCIApplicationServer) VerifyVoteExtension(ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { return nil, status.Errorf(codes.Unimplemented, "method VerifyVoteExtension not implemented") } +func (*UnimplementedABCIApplicationServer) FinalizeBlock(ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeBlock not implemented") +} func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { s.RegisterService(&_ABCIApplication_serviceDesc, srv) @@ -4053,24 +3873,6 @@ func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestDeliverTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).DeliverTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/DeliverTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*RequestDeliverTx)) - } - return interceptor(ctx, in, info, handler) -} - func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RequestCheckTx) if err := dec(in); err != nil { @@ -4143,42 +3945,6 @@ func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } -func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBeginBlock) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).BeginBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/BeginBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestEndBlock) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).EndBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/EndBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*RequestEndBlock)) - } - return interceptor(ctx, in, info, handler) -} - func _ABCIApplication_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RequestListSnapshots) if err := dec(in); err != nil { @@ -4323,6 +4089,24 @@ func _ABCIApplication_VerifyVoteExtension_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _ABCIApplication_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFinalizeBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).FinalizeBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/FinalizeBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).FinalizeBlock(ctx, req.(*RequestFinalizeBlock)) + } + return interceptor(ctx, in, info, handler) +} + var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ ServiceName: "tendermint.abci.ABCIApplication", HandlerType: (*ABCIApplicationServer)(nil), @@ -4339,10 +4123,6 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "Info", Handler: _ABCIApplication_Info_Handler, }, - { - MethodName: "DeliverTx", - Handler: _ABCIApplication_DeliverTx_Handler, - }, { MethodName: "CheckTx", Handler: _ABCIApplication_CheckTx_Handler, @@ -4359,14 +4139,6 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "InitChain", Handler: _ABCIApplication_InitChain_Handler, }, - { - MethodName: "BeginBlock", - Handler: _ABCIApplication_BeginBlock_Handler, - }, - { - MethodName: "EndBlock", - Handler: _ABCIApplication_EndBlock_Handler, - }, { MethodName: "ListSnapshots", Handler: _ABCIApplication_ListSnapshots_Handler, @@ -4399,6 +4171,10 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "VerifyVoteExtension", Handler: _ABCIApplication_VerifyVoteExtension_Handler, }, + { + MethodName: "FinalizeBlock", + Handler: _ABCIApplication_FinalizeBlock_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "tendermint/abci/types.proto", @@ -4541,35 +4317,14 @@ func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.CheckTx != nil { + if m.CheckTx != nil { { size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -4583,48 +4338,6 @@ func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - return len(dAtA) - i, nil -} -func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -4820,6 +4533,29 @@ func (m *Request_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, er } return len(dAtA) - i, nil } +func (m *Request_FinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FinalizeBlock != nil { + { + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} func (m *RequestEcho) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4985,12 +4721,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err20 != nil { - return 0, err20 + n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err18 != nil { + return 0, err18 } - i -= n20 - i = encodeVarintTypes(dAtA, i, uint64(n20)) + i -= n18 + i = encodeVarintTypes(dAtA, i, uint64(n18)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -5048,70 +4784,6 @@ func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5147,64 +4819,6 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *RequestCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5393,6 +5007,20 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if m.BlockDataSize != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.BlockDataSize)) i-- @@ -5522,7 +5150,7 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *Response) Marshal() (dAtA []byte, err error) { +func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5532,101 +5160,116 @@ func (m *Response) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Response) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } } - return len(dAtA) - i, nil -} - -func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Exception != nil { - { - size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + { + size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - return len(dAtA) - i, nil -} -func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Echo != nil { - { - size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) i-- dAtA[i] = 0x12 } + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Flush != nil { + _ = i + var l int + _ = l + if m.Value != nil { { - size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { return 0, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a } return len(dAtA) - i, nil } -func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { + +func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Info != nil { + if m.Exception != nil { { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5634,20 +5277,20 @@ func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.InitChain != nil { + if m.Echo != nil { { - size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5655,20 +5298,20 @@ func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x12 } return len(dAtA) - i, nil } -func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Query != nil { + if m.Flush != nil { { - size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5676,20 +5319,20 @@ func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 + dAtA[i] = 0x1a } return len(dAtA) - i, nil } -func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.BeginBlock != nil { + if m.Info != nil { { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5697,20 +5340,20 @@ func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x3a + dAtA[i] = 0x22 } return len(dAtA) - i, nil } -func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.CheckTx != nil { + if m.InitChain != nil { { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5718,20 +5361,20 @@ func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x2a } return len(dAtA) - i, nil } -func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.DeliverTx != nil { + if m.Query != nil { { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5739,20 +5382,20 @@ func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x4a + dAtA[i] = 0x32 } return len(dAtA) - i, nil } -func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.EndBlock != nil { + if m.CheckTx != nil { { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5760,7 +5403,7 @@ func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x52 + dAtA[i] = 0x42 } return len(dAtA) - i, nil } @@ -5961,6 +5604,29 @@ func (m *Response_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, e } return len(dAtA) - i, nil } +func (m *Response_FinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FinalizeBlock != nil { + { + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + return len(dAtA) - i, nil +} func (m *ResponseException) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6239,43 +5905,6 @@ func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6455,69 +6084,6 @@ func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.ConsensusParamUpdates != nil { - { - size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ValidatorUpdates) > 0 { - for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6678,20 +6244,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA50 := make([]byte, len(m.RefetchChunks)*10) - var j49 int + dAtA45 := make([]byte, len(m.RefetchChunks)*10) + var j44 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA50[j49] = uint8(uint64(num)&0x7f | 0x80) + dAtA45[j44] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j49++ + j44++ } - dAtA50[j49] = uint8(num) - j49++ + dAtA45[j44] = uint8(num) + j44++ } - i -= j49 - copy(dAtA[i:], dAtA50[:j49]) - i = encodeVarintTypes(dAtA, i, uint64(j49)) + i -= j44 + copy(dAtA[i:], dAtA45[:j44]) + i = encodeVarintTypes(dAtA, i, uint64(j44)) i-- dAtA[i] = 0x12 } @@ -6835,7 +6401,7 @@ func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { +func (m *ResponseFinalizeBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6845,20 +6411,20 @@ func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Votes) > 0 { - for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6866,13 +6432,90 @@ func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } } - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x1a + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Txs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -7159,12 +6802,12 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n55, err55 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err55 != nil { - return 0, err55 + n51, err51 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err51 != nil { + return 0, err51 } - i -= n55 - i = encodeVarintTypes(dAtA, i, uint64(n55)) + i -= n51 + i = encodeVarintTypes(dAtA, i, uint64(n51)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -7325,18 +6968,6 @@ func (m *Request_Query) Size() (n int) { } return n } -func (m *Request_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_CheckTx) Size() (n int) { if m == nil { return 0 @@ -7349,30 +6980,6 @@ func (m *Request_CheckTx) Size() (n int) { } return n } -func (m *Request_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Request_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_Commit) Size() (n int) { if m == nil { return 0 @@ -7481,6 +7088,18 @@ func (m *Request_VerifyVoteExtension) Size() (n int) { } return n } +func (m *Request_FinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} func (m *RequestEcho) Size() (n int) { if m == nil { return 0 @@ -7581,29 +7200,6 @@ func (m *RequestQuery) Size() (n int) { return n } -func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.LastCommitInfo.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *RequestCheckTx) Size() (n int) { if m == nil { return 0 @@ -7620,31 +7216,6 @@ func (m *RequestCheckTx) Size() (n int) { return n } -func (m *RequestDeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *RequestEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - return n -} - func (m *RequestCommit) Size() (n int) { if m == nil { return 0 @@ -7733,6 +7304,12 @@ func (m *RequestPrepareProposal) Size() (n int) { if m.BlockDataSize != 0 { n += 1 + sovTypes(uint64(m.BlockDataSize)) } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } return n } @@ -7779,6 +7356,38 @@ func (m *RequestProcessProposal) Size() (n int) { return n } +func (m *RequestFinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + l = m.LastCommitInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + func (m *Response) Size() (n int) { if m == nil { return 0 @@ -7863,18 +7472,6 @@ func (m *Response_Query) Size() (n int) { } return n } -func (m *Response_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_CheckTx) Size() (n int) { if m == nil { return 0 @@ -7887,30 +7484,6 @@ func (m *Response_CheckTx) Size() (n int) { } return n } -func (m *Response_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Response_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_Commit) Size() (n int) { if m == nil { return 0 @@ -8019,6 +7592,18 @@ func (m *Response_VerifyVoteExtension) Size() (n int) { } return n } +func (m *Response_FinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} func (m *ResponseException) Size() (n int) { if m == nil { return 0 @@ -8146,21 +7731,6 @@ func (m *ResponseQuery) Size() (n int) { return n } -func (m *ResponseBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *ResponseCheckTx) Size() (n int) { if m == nil { return 0 @@ -8252,43 +7822,18 @@ func (m *ResponseDeliverTx) Size() (n int) { return n } -func (m *ResponseEndBlock) Size() (n int) { +func (m *ResponseCommit) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.ValidatorUpdates) > 0 { - for _, e := range m.ValidatorUpdates { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.ConsensusParamUpdates != nil { - l = m.ConsensusParamUpdates.Size() + l = len(m.Data) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - -func (m *ResponseCommit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.RetainHeight != 0 { - n += 1 + sovTypes(uint64(m.RetainHeight)) + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) } return n } @@ -8416,6 +7961,37 @@ func (m *ResponseProcessProposal) Size() (n int) { return n } +func (m *ResponseFinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, e := range m.Txs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + func (m *LastCommitInfo) Size() (n int) { if m == nil { return 0 @@ -8796,41 +8372,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Query{v} iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestBeginBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_BeginBlock{v} - iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) @@ -8866,76 +8407,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_CheckTx{v} iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestDeliverTx{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_DeliverTx{v} - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestEndBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_EndBlock{v} - iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) @@ -9251,6 +8722,41 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_VerifyVoteExtension{v} iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestFinalizeBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_FinalizeBlock{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -9527,407 +9033,14 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AbciVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestInitChain) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConsensusParams == nil { - m.ConsensusParams = &types1.ConsensusParams{} - } - if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Validators = append(m.Validators, ValidatorUpdate{}) - if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) - if m.AppStateBytes == nil { - m.AppStateBytes = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) - } - m.InitialHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InitialHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestQuery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if postIndex < 0 { + return ErrInvalidLengthTypes } - m.Prove = bool(v != 0) + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AbciVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -9949,7 +9062,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9972,17 +9085,17 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9992,31 +9105,30 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10026,28 +9138,27 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ChainId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10074,13 +9185,16 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConsensusParams == nil { + m.ConsensusParams = &types1.ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10107,11 +9221,64 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Validators = append(m.Validators, ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) + if m.AppStateBytes == nil { + m.AppStateBytes = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -10133,7 +9300,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { +func (m *RequestQuery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10156,15 +9323,15 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -10191,16 +9358,48 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - m.Type = 0 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10210,11 +9409,31 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.Prove = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -10236,7 +9455,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10259,10 +9478,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10299,61 +9518,11 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { m.Tx = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Height = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10363,7 +9532,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Type |= CheckTxType(b&0x7F) << shift if b < 0x80 { break } @@ -10931,6 +10100,40 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Votes = append(m.Votes, &types1.Vote{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11239,7 +10442,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *Response) Unmarshal(dAtA []byte) error { +func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11262,15 +10465,100 @@ func (m *Response) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Response: wiretype end group for non-group") + return fmt.Errorf("proto: RequestFinalizeBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11297,15 +10585,13 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseException{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Exception{v} iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11332,15 +10618,13 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEcho{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Echo{v} iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11367,15 +10651,64 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseFlush{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Flush{v} iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11402,15 +10735,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInfo{} + v := &ResponseException{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Info{v} + m.Value = &Response_Exception{v} iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11437,15 +10770,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInitChain{} + v := &ResponseEcho{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_InitChain{v} + m.Value = &Response_Echo{v} iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11472,15 +10805,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseQuery{} + v := &ResponseFlush{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Query{v} + m.Value = &Response_Flush{v} iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11507,15 +10840,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseBeginBlock{} + v := &ResponseInfo{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_BeginBlock{v} + m.Value = &Response_Info{v} iNdEx = postIndex - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11542,15 +10875,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} + v := &ResponseInitChain{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_CheckTx{v} + m.Value = &Response_InitChain{v} iNdEx = postIndex - case 9: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11577,15 +10910,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} + v := &ResponseQuery{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_DeliverTx{v} + m.Value = &Response_Query{v} iNdEx = postIndex - case 10: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11612,11 +10945,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEndBlock{} + v := &ResponseCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_EndBlock{v} + m.Value = &Response_CheckTx{v} iNdEx = postIndex case 11: if wireType != 2 { @@ -11863,9 +11196,44 @@ func (m *Response) Unmarshal(dAtA []byte) error { } m.Value = &Response_ProcessProposal{v} iNdEx = postIndex - case 18: + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseExtendVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ExtendVote{v} + iNdEx = postIndex + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11892,15 +11260,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseExtendVote{} + v := &ResponseVerifyVoteExtension{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_ExtendVote{v} + m.Value = &Response_VerifyVoteExtension{v} iNdEx = postIndex - case 19: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11927,11 +11295,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseVerifyVoteExtension{} + v := &ResponseFinalizeBlock{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_VerifyVoteExtension{v} + m.Value = &Response_FinalizeBlock{v} iNdEx = postIndex default: iNdEx = preIndex @@ -12815,90 +12183,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -13391,173 +12675,19 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) - } - m.GasWanted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) - } - m.GasUsed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Codespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + if postIndex < 0 { + return ErrInvalidLengthTypes } - var msglen int + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13567,29 +12697,33 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.GasWanted |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) } - m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) - if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13616,18 +12750,16 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} - } - if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13637,25 +12769,23 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -14533,6 +13663,194 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseFinalizeBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, &ResponseDeliverTx{}) + if err := m.Txs[len(m.Txs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &types1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/cmd/tendermint/commands/completion.go b/cmd/tendermint/commands/completion.go new file mode 100644 index 000000000..d2c81f0af --- /dev/null +++ b/cmd/tendermint/commands/completion.go @@ -0,0 +1,46 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// NewCompletionCmd returns a cobra.Command that generates bash and zsh +// completion scripts for the given root command. If hidden is true, the +// command will not show up in the root command's list of available commands. +func NewCompletionCmd(rootCmd *cobra.Command, hidden bool) *cobra.Command { + flagZsh := "zsh" + cmd := &cobra.Command{ + Use: "completion", + Short: "Generate shell completion scripts", + Long: fmt.Sprintf(`Generate Bash and Zsh completion scripts and print them to STDOUT. + +Once saved to file, a completion script can be loaded in the shell's +current session as shown: + + $ . <(%s completion) + +To configure your bash shell to load completions for each session add to +your $HOME/.bashrc or $HOME/.profile the following instruction: + + . <(%s completion) +`, rootCmd.Use, rootCmd.Use), + RunE: func(cmd *cobra.Command, _ []string) error { + zsh, err := cmd.Flags().GetBool(flagZsh) + if err != nil { + return err + } + if zsh { + return rootCmd.GenZshCompletion(cmd.OutOrStdout()) + } + return rootCmd.GenBashCompletion(cmd.OutOrStdout()) + }, + Hidden: hidden, + Args: cobra.NoArgs, + } + + cmd.Flags().Bool(flagZsh, false, "Generate Zsh completion script") + + return cmd +} diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go index 1706f8b6a..928821586 100644 --- a/cmd/tendermint/commands/key_migrate.go +++ b/cmd/tendermint/commands/key_migrate.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/spf13/cobra" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/scripts/keymigrate" diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 00a1d14b2..5ecbba617 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -199,10 +199,9 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { } e := types.EventDataNewBlockHeader{ - Header: b.Header, - NumTxs: int64(len(b.Txs)), - ResultBeginBlock: *r.BeginBlock, - ResultEndBlock: *r.EndBlock, + Header: b.Header, + NumTxs: int64(len(b.Txs)), + ResultFinalizeBlock: *r.FinalizeBlock, } var batch *indexer.Batch @@ -214,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { Height: b.Height, Index: uint32(i), Tx: b.Data.Txs[i], - Result: *(r.DeliverTxs[i]), + Result: *(r.FinalizeBlock.Txs[i]), } _ = batch.Add(&tr) diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index 91b1ba42a..c525d4baa 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/state/indexer" @@ -16,7 +18,6 @@ import ( "github.com/tendermint/tendermint/libs/log" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" _ "github.com/lib/pq" // for the psql sink ) @@ -110,7 +111,7 @@ func TestLoadEventSink(t *testing.T) { } func TestLoadBlockStore(t *testing.T) { - testCfg, err := config.ResetTestRoot(t.Name()) + testCfg, err := config.ResetTestRoot(t.TempDir(), t.Name()) require.NoError(t, err) testCfg.DBBackend = "goleveldb" _, _, err = loadStateAndBlockStore(testCfg) @@ -154,9 +155,9 @@ func TestReIndexEvent(t *testing.T) { dtx := abcitypes.ResponseDeliverTx{} abciResp := &prototmstate.ABCIResponses{ - DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx}, - EndBlock: &abcitypes.ResponseEndBlock{}, - BeginBlock: &abcitypes.ResponseBeginBlock{}, + FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ + Txs: []*abcitypes.ResponseDeliverTx{&dtx}, + }, } mockStateStore. diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index 15520c469..fb6f19e55 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -2,6 +2,7 @@ package commands import ( "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/libs/log" diff --git a/cmd/tendermint/commands/rollback_test.go b/cmd/tendermint/commands/rollback_test.go index 167fbc1f3..760dbf0ec 100644 --- a/cmd/tendermint/commands/rollback_test.go +++ b/cmd/tendermint/commands/rollback_test.go @@ -19,10 +19,12 @@ func TestRollbackIntegration(t *testing.T) { dir := t.TempDir() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := rpctest.CreateConfig(t.Name()) + cfg, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) cfg.BaseConfig.DBBackend = "goleveldb" + app, err := e2e.NewApplication(e2e.DefaultConfig(dir)) + require.NoError(t, err) t.Run("First run", func(t *testing.T) { ctx, cancel := context.WithCancel(ctx) @@ -30,27 +32,29 @@ func TestRollbackIntegration(t *testing.T) { require.NoError(t, err) node, _, err := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout) require.NoError(t, err) + require.True(t, node.IsRunning()) time.Sleep(3 * time.Second) cancel() node.Wait() + require.False(t, node.IsRunning()) }) - t.Run("Rollback", func(t *testing.T) { + time.Sleep(time.Second) require.NoError(t, app.Rollback()) height, _, err = commands.RollbackState(cfg) - require.NoError(t, err) - + require.NoError(t, err, "%d", height) }) - t.Run("Restart", func(t *testing.T) { + require.True(t, height > 0, "%d", height) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout) require.NoError(t, err2) - logger := log.NewTestingLogger(t) + logger := log.NewNopLogger() client, err := local.New(logger, node2.(local.NodeService)) require.NoError(t, err) diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index 2eade59b6..54f96f907 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -1,6 +1,7 @@ package commands import ( + "context" "fmt" "os" "path/filepath" @@ -17,6 +18,17 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" ) +// writeConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func writeConfigVals(dir string, vals map[string]string) error { + data := "" + for k, v := range vals { + data += fmt.Sprintf("%s = \"%s\"\n", k, v) + } + cfile := filepath.Join(dir, "config.toml") + return os.WriteFile(cfile, []byte(data), 0600) +} + // clearConfig clears env vars, the given root dir, and resets viper. func clearConfig(t *testing.T, dir string) *cfg.Config { t.Helper() @@ -41,7 +53,7 @@ func testRootCmd(conf *cfg.Config) *cobra.Command { return cmd } -func testSetup(t *testing.T, conf *cfg.Config, args []string, env map[string]string) error { +func testSetup(ctx context.Context, t *testing.T, conf *cfg.Config, args []string, env map[string]string) error { t.Helper() cmd := testRootCmd(conf) @@ -49,7 +61,7 @@ func testSetup(t *testing.T, conf *cfg.Config, args []string, env map[string]str // run with the args and env args = append([]string{cmd.Use}, args...) - return cli.RunWithArgs(cmd, args, env) + return cli.RunWithArgs(ctx, cmd, args, env) } func TestRootHome(t *testing.T) { @@ -65,11 +77,14 @@ func TestRootHome(t *testing.T) { {nil, map[string]string{"TMHOME": newRoot}, newRoot}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for i, tc := range cases { t.Run(fmt.Sprint(i), func(t *testing.T) { conf := clearConfig(t, tc.root) - err := testSetup(t, conf, tc.args, tc.env) + err := testSetup(ctx, t, conf, tc.args, tc.env) require.NoError(t, err) require.Equal(t, tc.root, conf.RootDir) @@ -99,11 +114,14 @@ func TestRootFlagsEnv(t *testing.T) { {nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for i, tc := range cases { t.Run(fmt.Sprint(i), func(t *testing.T) { conf := clearConfig(t, defaultDir) - err := testSetup(t, conf, tc.args, tc.env) + err := testSetup(ctx, t, conf, tc.args, tc.env) require.NoError(t, err) assert.Equal(t, tc.logLevel, conf.LogLevel) @@ -113,6 +131,9 @@ func TestRootFlagsEnv(t *testing.T) { } func TestRootConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // write non-default config nonDefaultLogLvl := "debug" cvals := map[string]string{ @@ -142,14 +163,14 @@ func TestRootConfig(t *testing.T) { // write the non-defaults to a different path // TODO: support writing sub configs so we can test that too - err = WriteConfigVals(configFilePath, cvals) + err = writeConfigVals(configFilePath, cvals) require.NoError(t, err) cmd := testRootCmd(conf) // run with the args and env tc.args = append([]string{cmd.Use}, tc.args...) - err = cli.RunWithArgs(cmd, tc.args, tc.env) + err = cli.RunWithArgs(ctx, cmd, tc.args, tc.env) require.NoError(t, err) require.Equal(t, tc.logLvl, conf.LogLevel) diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index afd3ae8f1..5f39fb21e 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -117,7 +117,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger lo return fmt.Errorf("failed to start node: %w", err) } - logger.Info("started node", "node", n.String()) + logger.Info("started node", "chain", conf.ChainID()) <-ctx.Done() return nil diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go index 9183a7c5e..ffc6c4d5e 100644 --- a/cmd/tendermint/commands/show_node_id.go +++ b/cmd/tendermint/commands/show_node_id.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" ) diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 6d2391dde..91ee89bea 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -44,7 +44,7 @@ func main() { commands.MakeRollbackStateCommand(conf), commands.MakeKeyMigrateCommand(conf, logger), debug.DebugCmd, - cli.NewCompletionCmd(rcmd, true), + commands.NewCompletionCmd(rcmd, true), ) // NOTE: @@ -60,7 +60,7 @@ func main() { // Create & start node rcmd.AddCommand(commands.NewRunNodeCmd(nodeFunc, conf, logger)) - if err := rcmd.ExecuteContext(ctx); err != nil { + if err := cli.RunWithTrace(ctx, rcmd); err != nil { panic(err) } } diff --git a/config/toml.go b/config/toml.go index 665300727..41d2a6614 100644 --- a/config/toml.go +++ b/config/toml.go @@ -504,13 +504,13 @@ namespace = "{{ .Instrumentation.Namespace }}" /****** these are for test settings ***********/ -func ResetTestRoot(testName string) (*Config, error) { - return ResetTestRootWithChainID(testName, "") +func ResetTestRoot(dir, testName string) (*Config, error) { + return ResetTestRootWithChainID(dir, testName, "") } -func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) { +func ResetTestRootWithChainID(dir, testName string, chainID string) (*Config, error) { // create a unique, concurrency-safe test directory under os.TempDir() - rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName)) + rootDir, err := os.MkdirTemp(dir, fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { return nil, err } diff --git a/config/toml_test.go b/config/toml_test.go index fa7e88da0..cf27c4484 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -20,9 +20,7 @@ func ensureFiles(t *testing.T, rootDir string, files ...string) { func TestEnsureRoot(t *testing.T) { // setup temp dir for test - tmpDir, err := os.MkdirTemp("", "config-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() // create root dir EnsureRoot(tmpDir) @@ -42,7 +40,7 @@ func TestEnsureTestRoot(t *testing.T) { testName := "ensureTestRoot" // create root dir - cfg, err := ResetTestRoot(testName) + cfg, err := ResetTestRoot(t.TempDir(), testName) require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) rootDir := cfg.RootDir diff --git a/crypto/ed25519/bench_test.go b/crypto/ed25519/bench_test.go index e57cd393f..49fcd1504 100644 --- a/crypto/ed25519/bench_test.go +++ b/crypto/ed25519/bench_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/internal/benchmarking" ) diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index f212874c7..7892cfbb1 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -9,11 +9,12 @@ import ( "math/big" secp256k1 "github.com/btcsuite/btcd/btcec" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/jsontypes" // necessary for Bitcoin address format - "golang.org/x/crypto/ripemd160" // nolint + "golang.org/x/crypto/ripemd160" //nolint:staticcheck ) //------------------------------------- @@ -178,3 +179,67 @@ func (pubKey PubKey) Equals(other crypto.PubKey) bool { func (pubKey PubKey) Type() string { return KeyType } + +// used to reject malleable signatures +// see: +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 +var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) + +// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. +// The returned signature will be of the form R || S (in lower-S form). +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey) + + sig, err := priv.Sign(crypto.Sha256(msg)) + if err != nil { + return nil, err + } + + sigBytes := serializeSig(sig) + return sigBytes, nil +} + +// VerifySignature verifies a signature of the form R || S. +// It rejects signatures which are not in lower-S form. +func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool { + if len(sigStr) != 64 { + return false + } + + pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256()) + if err != nil { + return false + } + + // parse the signature: + signature := signatureFromBytes(sigStr) + // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't. + // see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 + if signature.S.Cmp(secp256k1halfN) > 0 { + return false + } + + return signature.Verify(crypto.Sha256(msg), pub) +} + +// Read Signature struct from R || S. Caller needs to ensure +// that len(sigStr) == 64. +func signatureFromBytes(sigStr []byte) *secp256k1.Signature { + return &secp256k1.Signature{ + R: new(big.Int).SetBytes(sigStr[:32]), + S: new(big.Int).SetBytes(sigStr[32:64]), + } +} + +// Serialize signature to R || S. +// R, S are padded to 32 bytes respectively. +func serializeSig(sig *secp256k1.Signature) []byte { + rBytes := sig.R.Bytes() + sBytes := sig.S.Bytes() + sigBytes := make([]byte, 64) + // 0 pad the byte arrays from the left if they aren't big enough. + copy(sigBytes[32-len(rBytes):32], rBytes) + copy(sigBytes[64-len(sBytes):64], sBytes) + return sigBytes +} diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go deleted file mode 100644 index 6b52dc5d2..000000000 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build !libsecp256k1 -// +build !libsecp256k1 - -package secp256k1 - -import ( - "math/big" - - secp256k1 "github.com/btcsuite/btcd/btcec" - - "github.com/tendermint/tendermint/crypto" -) - -// used to reject malleable signatures -// see: -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 -var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) - -// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. -// The returned signature will be of the form R || S (in lower-S form). -func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { - priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey) - - sig, err := priv.Sign(crypto.Sha256(msg)) - if err != nil { - return nil, err - } - - sigBytes := serializeSig(sig) - return sigBytes, nil -} - -// VerifySignature verifies a signature of the form R || S. -// It rejects signatures which are not in lower-S form. -func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool { - if len(sigStr) != 64 { - return false - } - - pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256()) - if err != nil { - return false - } - - // parse the signature: - signature := signatureFromBytes(sigStr) - // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't. - // see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 - if signature.S.Cmp(secp256k1halfN) > 0 { - return false - } - - return signature.Verify(crypto.Sha256(msg), pub) -} - -// Read Signature struct from R || S. Caller needs to ensure -// that len(sigStr) == 64. -func signatureFromBytes(sigStr []byte) *secp256k1.Signature { - return &secp256k1.Signature{ - R: new(big.Int).SetBytes(sigStr[:32]), - S: new(big.Int).SetBytes(sigStr[32:64]), - } -} - -// Serialize signature to R || S. -// R, S are padded to 32 bytes respectively. -func serializeSig(sig *secp256k1.Signature) []byte { - rBytes := sig.R.Bytes() - sBytes := sig.S.Bytes() - sigBytes := make([]byte, 64) - // 0 pad the byte arrays from the left if they aren't big enough. - copy(sigBytes[32-len(rBytes):32], rBytes) - copy(sigBytes[64-len(sBytes):64], sBytes) - return sigBytes -} diff --git a/crypto/sr25519/bench_test.go b/crypto/sr25519/bench_test.go index 559bd0576..086a899c0 100644 --- a/crypto/sr25519/bench_test.go +++ b/crypto/sr25519/bench_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/internal/benchmarking" ) diff --git a/docs/architecture/adr-071-proposer-based-timestamps.md b/docs/architecture/adr-071-proposer-based-timestamps.md index aad274e47..57a152c1f 100644 --- a/docs/architecture/adr-071-proposer-based-timestamps.md +++ b/docs/architecture/adr-071-proposer-based-timestamps.md @@ -61,7 +61,7 @@ The following protocols and application features require a reliable source of ti * Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. * Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/spec/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification). * Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime). -* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.43/ibc/overview.html#acknowledgements). +* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements) Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate. This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear). @@ -116,7 +116,7 @@ This timestamp is therefore no longer useful as part of consensus and may option type Vote struct { Type tmproto.SignedMsgType `json:"type"` Height int64 `json:"height"` - Round int32 `json:"round"` + Round int32 `json:"round"` BlockID BlockID `json:"block_id"` // zero if vote is nil. -- Timestamp time.Time `json:"timestamp"` ValidatorAddress Address `json:"validator_address"` @@ -135,7 +135,7 @@ A validator will only Prevote a proposal if the proposal timestamp is considered A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator. More specifically, a proposal timestamp is `timely` if `proposalTimestamp - PRECISION ≤ validatorLocalTime ≤ proposalTimestamp + PRECISION + MSGDELAY`. -Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/types/params.proto#L13) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration). +Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/spec/blob/master/proto/tendermint/types/params.proto#L11) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration). The consensus parameters will be updated to include this `Synchrony` field as follows: diff --git a/rfc/001-block-retention.md b/docs/architecture/adr-077-block-retention.md similarity index 96% rename from rfc/001-block-retention.md rename to docs/architecture/adr-077-block-retention.md index 80377145e..714b4810a 100644 --- a/rfc/001-block-retention.md +++ b/docs/architecture/adr-077-block-retention.md @@ -1,4 +1,4 @@ -# RFC 001: Configurable Block Retention +# ADR 077: Configurable Block Retention ## Changelog @@ -6,6 +6,7 @@ - 2020-03-25: Use local config for snapshot interval (@erikgrinaker) - 2020-03-31: Use ABCI commit response for block retention hint - 2020-04-02: Resolved open questions +- 2021-02-11: Migrate to tendermint repo (Originally [RFC 001](https://github.com/tendermint/spec/pull/84)) ## Author(s) @@ -73,7 +74,7 @@ The returned `retain_height` would be the lowest height that satisfies: - Local config: archive nodes may want to retain more or all blocks, e.g. via a local config option `min-retain-blocks`. There may also be a need to vary rentention for other nodes, e.g. sentry nodes which do not need historical blocks. -![Cosmos SDK block retention diagram](images/block-retention.png) +![Cosmos SDK block retention diagram](img/block-retention.png) ## Status diff --git a/rfc/002-nonzero-genesis.md b/docs/architecture/adr-078-nonzero-genesis.md similarity index 96% rename from rfc/002-nonzero-genesis.md rename to docs/architecture/adr-078-nonzero-genesis.md index 8773ae74f..8fdc1e61a 100644 --- a/rfc/002-nonzero-genesis.md +++ b/docs/architecture/adr-078-nonzero-genesis.md @@ -1,4 +1,4 @@ -# RFC 002: Non-Zero Genesis +# ADR 078: Non-Zero Genesis ## Changelog @@ -6,6 +6,7 @@ - 2020-07-28: Use weak chain linking, i.e. `predecessor` field (@erikgrinaker) - 2020-07-31: Drop chain linking (@erikgrinaker) - 2020-08-03: Add `State.InitialHeight` (@erikgrinaker) +- 2021-02-11: Migrate to tendermint repo (Originally [RFC 002](https://github.com/tendermint/spec/pull/119)) ## Author(s) diff --git a/rfc/003-ed25519-verification.md b/docs/architecture/adr-079-ed25519-verification.md similarity index 94% rename from rfc/003-ed25519-verification.md rename to docs/architecture/adr-079-ed25519-verification.md index 140717b0a..c20869e6c 100644 --- a/rfc/003-ed25519-verification.md +++ b/docs/architecture/adr-079-ed25519-verification.md @@ -1,8 +1,9 @@ -# RFC 003: Ed25519 Verification +# ADR 079: Ed25519 Verification ## Changelog -- August 21, 2020: initialized +- 2020-08-21: Initial RFC +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 003](https://github.com/tendermint/spec/pull/144)) ## Author(s) diff --git a/rfc/005-reverse-sync.md b/docs/architecture/adr-080-reverse-sync.md similarity index 97% rename from rfc/005-reverse-sync.md rename to docs/architecture/adr-080-reverse-sync.md index cbecf2c98..50604ecee 100644 --- a/rfc/005-reverse-sync.md +++ b/docs/architecture/adr-080-reverse-sync.md @@ -1,7 +1,8 @@ -# RFC 005: ReverseSync - fetching historical data +# ADR 080: ReverseSync - fetching historical data ## Changelog +- 2021-02-11: Migrate to tendermint repo (Originally [RFC 005](https://github.com/tendermint/spec/pull/224)) - 2021-04-19: Use P2P to gossip necessary data for reverse sync. - 2021-03-03: Simplify proposal to the state sync case. - 2021-02-17: Add notes on asynchronicity of processes. diff --git a/rfc/images/block-retention.png b/docs/architecture/img/block-retention.png similarity index 100% rename from rfc/images/block-retention.png rename to docs/architecture/img/block-retention.png diff --git a/docs/pre.sh b/docs/pre.sh index 8ce5dac4f..76a1cff99 100755 --- a/docs/pre.sh +++ b/docs/pre.sh @@ -1,4 +1,4 @@ #!/bin/bash cp -a ../rpc/openapi/ .vuepress/public/rpc/ -git clone https://github.com/tendermint/spec.git specRepo && cp -r specRepo/spec . && rm -rf specRepo +cp -r ../spec . diff --git a/docs/rfc/README.md b/docs/rfc/README.md index 1517db2f5..aed69a077 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -41,12 +41,15 @@ sections. - [RFC-001: Storage Engines](./rfc-001-storage-engine.rst) - [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md) - [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) -- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md) +- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.rst) - [RFC-005: Event System](./rfc-005-event-system.rst) - [RFC-006: Event Subscription](./rfc-006-event-subscription.md) - [RFC-007: Deterministic Proto Byte Serialization](./rfc-007-deterministic-proto-bytes.md) - [RFC-008: Don't Panic](./rfc-008-don't-panic.md) - [RFC-009: Consensus Parameter Upgrades](./rfc-009-consensus-parameter-upgrades.md) - [RFC-010: P2P Light Client](./rfc-010-p2p-light-client.rst) +- [RFC-011: Delete Gas](./rfc-011-delete-gas.md) +- [RFC-013: ABCI++](./rfc-013-abci++.md) +- [RFC-014: Semantic Versioning](./rfc-014-semantic-versioning.md) diff --git a/rfc/images/abci++.png b/docs/rfc/images/abci++.png similarity index 100% rename from rfc/images/abci++.png rename to docs/rfc/images/abci++.png diff --git a/rfc/images/abci.png b/docs/rfc/images/abci.png similarity index 100% rename from rfc/images/abci.png rename to docs/rfc/images/abci.png diff --git a/docs/rfc/rfc-011-abci++.md b/docs/rfc/rfc-011-abci++.md new file mode 100644 index 000000000..6193ae280 --- /dev/null +++ b/docs/rfc/rfc-011-abci++.md @@ -0,0 +1,257 @@ +<<<<<<< HEAD:docs/rfc/rfc-011-abci++.md +# RFC 011: ABCI++ +======= +# RFC 013: ABCI++ +>>>>>>> a895a8ea5f (Rename and renumber imported RFCs.):docs/rfc/rfc-013-abci++.md + +## Changelog + +- 2020-01-11: initialized +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254)) + +## Author(s) + +- Dev (@valardragon) +- Sunny (@sunnya97) + +## Context + +ABCI is the interface between the consensus engine and the application. +It defines when the application can talk to consensus during the execution of a blockchain. +At the moment, the application can only act at one phase in consensus, immediately after a block has been finalized. + +This restriction on the application prohibits numerous features for the application, including many scalability improvements that are now better understood than when ABCI was first written. +For example, many of the scalability proposals can be boiled down to "Make the miner / block proposers / validators do work, so the network does not have to". +This includes optimizations such as tx-level signature aggregation, state transition proofs, etc. +Furthermore, many new security properties cannot be achieved in the current paradigm, as the application cannot enforce validators do more than just finalize txs. +This includes features such as threshold cryptography, and guaranteed IBC connection attempts. +We propose introducing three new phases to ABCI to enable these new features, and renaming the existing methods for block execution. + +#### Prepare Proposal phase + +This phase aims to allow the block proposer to perform more computation, to reduce load on all other full nodes, and light clients in the network. +It is intended to enable features such as batch optimizations on the transaction data (e.g. signature aggregation, zk rollup style validity proofs, etc.), enabling stateless blockchains with validator provided authentication paths, etc. + +This new phase will only be executed by the block proposer. The application will take in the block header and raw transaction data output by the consensus engine's mempool. It will then return block data that is prepared for gossip on the network, and additional fields to include into the block header. + +#### Process Proposal Phase + +This phase aims to allow applications to determine validity of a new block proposal, and execute computation on the block data, prior to the blocks finalization. +It is intended to enable applications to reject block proposals with invalid data, and to enable alternate pipelined execution models. (Such as Ethereum-style immediate execution) + +This phase will be executed by all full nodes upon receiving a block, though on the application side it can do more work in the even that the current node is a validator. + +#### Vote Extension Phase + +This phase aims to allow applications to require their validators do more than just validate blocks. +Example usecases of this include validator determined price oracles, validator guaranteed IBC connection attempts, and validator based threshold crypto. + +This adds an app-determined data field that every validator must include with their vote, and these will thus appear in the header. + +#### Rename {BeginBlock, [DeliverTx], EndBlock} to FinalizeBlock + +The prior phases gives the application more flexibility in their execution model for a block, and they obsolete the current methods for how the consensus engine relates the block data to the state machine. Thus we refactor the existing methods to better reflect what is happening in the new ABCI model. + +This rename doesn't on its own enable anything new, but instead improves naming to clarify the expectations from the application in this new communication model. The existing ABCI methods `BeginBlock, [DeliverTx], EndBlock` are renamed to a single method called `FinalizeBlock`. + +#### Summary + +We include a more detailed list of features / scaling improvements that are blocked, and which new phases resolve them at the end of this document. + + +On the top is the existing definition of ABCI, and on the bottom is the proposed ABCI++. + +## Proposal + +Below we suggest an API to add these three new phases. +In this document, sometimes the final round of voting is referred to as precommit for clarity in how it acts in the Tendermint case. + +### Prepare Proposal + +*Note, APIs in this section will change after Vote Extensions, we list the adjusted APIs further in the proposal.* + +The Prepare Proposal phase allows the block proposer to perform application-dependent work in a block, to lower the amount of work the rest of the network must do. This enables batch optimizations to a block, which has been empirically demonstrated to be a key component for scaling. This phase introduces the following ABCI method + +```rust +fn PrepareProposal(Block) -> BlockData +``` + +where `BlockData` is a type alias for however data is internally stored within the consensus engine. In Tendermint Core today, this is `[]Tx`. + +The application may read the entire block proposal, and mutate the block data fields. Mutated transactions will still get removed from the mempool later on, as the mempool rechecks all transactions after a block is executed. + +The `PrepareProposal` API will be modified in the vote extensions section, for allowing the application to modify the header. + +### Process Proposal + +The Process Proposal phase sends the block data to the state machine, prior to running the last round of votes on the state machine. This enables features such as allowing validators to reject a block according to whether state machine deems it valid, and changing block execution pipeline. + +We introduce three new methods, + +```rust +fn VerifyHeader(header: Header, isValidator: bool) -> ResponseVerifyHeader {...} +fn ProcessProposal(block: Block) -> ResponseProcessProposal {...} +fn RevertProposal(height: usize, round: usize) {...} +``` + +where + +```rust +struct ResponseVerifyHeader { + accept_header: bool, + evidence: Vec +} +struct ResponseProcessProposal { + accept_block: bool, + evidence: Vec +} +``` + +Upon receiving a block header, every validator runs `VerifyHeader(header, isValidator)`. The reason for why `VerifyHeader` is split from `ProcessProposal` is due to the later sections for Preprocess Proposal and Vote Extensions, where there may be application dependent data in the header that must be verified before accepting the header. +If the returned `ResponseVerifyHeader.accept_header` is false, then the validator must precommit nil on this block, and reject all other precommits on this block. `ResponseVerifyHeader.evidence` is appended to the validators local `EvidencePool`. + +Upon receiving an entire block proposal (in the current implementation, all "block parts"), every validator runs `ProcessProposal(block)`. If the returned `ResponseProcessProposal.accept_block` is false, then the validator must precommit nil on this block, and reject all other precommits on this block. `ResponseProcessProposal.evidence` is appended to the validators local `EvidencePool`. + +Once a validator knows that consensus has failed to be achieved for a given block, it must run `RevertProposal(block.height, block.round)`, in order to signal to the application to revert any potentially mutative state changes it may have made. In Tendermint, this occurs when incrementing rounds. + +**RFC**: How do we handle the scenario where honest node A finalized on round x, and honest node B finalized on round x + 1? (e.g. when 2f precommits are publicly known, and a validator precommits themself but doesn't broadcast, but they increment rounds) Is this a real concern? The state root derived could change if everyone finalizes on round x+1, not round x, as the state machine can depend non-uniformly on timestamp. + +The application is expected to cache the block data for later execution. + +The `isValidator` flag is set according to whether the current node is a validator or a full node. This is intended to allow for beginning validator-dependent computation that will be included later in vote extensions. (An example of this is threshold decryptions of ciphertexts.) + +### DeliverTx rename to FinalizeBlock + +After implementing `ProcessProposal`, txs no longer need to be delivered during the block execution phase. Instead, they are already in the state machine. Thus `BeginBlock, DeliverTx, EndBlock` can all be replaced with a single ABCI method for `ExecuteBlock`. Internally the application may still structure its method for executing the block as `BeginBlock, DeliverTx, EndBlock`. However, it is overly restrictive to enforce that the block be executed after it is finalized. There are multiple other, very reasonable pipelined execution models one can go for. So instead we suggest calling this succession of methods `FinalizeBlock`. We propose the following API + +Replace the `BeginBlock, DeliverTx, EndBlock` ABCI methods with the following method + +```rust +fn FinalizeBlock() -> ResponseFinalizeBlock +``` + +where `ResponseFinalizeBlock` has the following API, in terms of what already exists + +```rust +struct ResponseFinalizeBlock { + updates: ResponseEndBlock, + tx_results: Vec +} +``` + +`ResponseEndBlock` should then be renamed to `ConsensusUpdates` and `ResponseDeliverTx` should be renamed to `ResponseTx`. + +### Vote Extensions + +The Vote Extensions phase allow applications to force their validators to do more than just validate within consensus. This is done by allowing the application to add more data to their votes, in the final round of voting. (Namely the precommit) +This additional application data will then appear in the block header. + +First we discuss the API changes to the vote struct directly + +```rust +fn ExtendVote(height: u64, round: u64) -> (UnsignedAppVoteData, SelfAuthenticatingAppData) +fn VerifyVoteExtension(signed_app_vote_data: Vec, self_authenticating_app_vote_data: Vec) -> bool +``` + +There are two types of data that the application can enforce validators to include with their vote. +There is data that the app needs the validator to sign over in their vote, and there can be self-authenticating vote data. Self-authenticating here means that the application upon seeing these bytes, knows its valid, came from the validator and is non-malleable. We give an example of each type of vote data here, to make their roles clearer. + +- Unsigned app vote data: A use case of this is if you wanted validator backed oracles, where each validator independently signs some oracle data in their vote, and the median of these values is used on chain. Thus we leverage consensus' signing process for convenience, and use that same key to sign the oracle data. +- Self-authenticating vote data: A use case of this is in threshold random beacons. Every validator produces a threshold beacon share. This threshold beacon share can be verified by any node in the network, given the share and the validators public key (which is not the same as its consensus public key). However, this decryption share will not make it into the subsequent block's header. They will be aggregated by the subsequent block proposer to get a single random beacon value that will appear in the subsequent block's header. Everyone can then verify that this aggregated value came from the requisite threshold of the validator set, without increasing the bandwidth for full nodes or light clients. To achieve this goal, the self-authenticating vote data cannot be signed over by the consensus key along with the rest of the vote, as that would require all full nodes & light clients to know this data in order to verify the vote. + +The `CanonicalVote` struct will acommodate the `UnsignedAppVoteData` field by adding another string to its encoding, after the `chain-id`. This should not interfere with existing hardware signing integrations, as it does not affect the constant offset for the `height` and `round`, and the vote size does not have an explicit upper bound. (So adding this unsigned app vote data field is equivalent from the HSM's perspective as having a superlong chain-ID) + +**RFC**: Please comment if you think it will be fine to have elongate the message the HSM signs, or if we need to explore pre-hashing the app vote data. + +The flow of these methods is that when a validator has to precommit, Tendermint will first produce a precommit canonical vote without the application vote data. It will then pass it to the application, which will return unsigned application vote data, and self authenticating application vote data. It will bundle the `unsigned_application_vote_data` into the canonical vote, and pass it to the HSM to sign. Finally it will package the self-authenticating app vote data, and the `signed_vote_data` together, into one final Vote struct to be passed around the network. + +#### Changes to Prepare Proposal Phase + +There are many use cases where the additional data from vote extensions can be batch optimized. +This is mainly of interest when the votes include self-authenticating app vote data that be batched together, or the unsigned app vote data is the same across all votes. +To allow for this, we change the PrepareProposal API to the following + +```rust +fn PrepareProposal(Block, UnbatchedHeader) -> (BlockData, Header) +``` + +where `UnbatchedHeader` essentially contains a "RawCommit", the `Header` contains a batch-optimized `commit` and an additional "Application Data" field in its root. This will involve a number of changes to core data structures, which will be gone over in the ADR. +The `Unbatched` header and `rawcommit` will never be broadcasted, they will be completely internal to consensus. + +#### Inter-process communication (IPC) effects + +For brevity in exposition above, we did not discuss the trade-offs that may occur in interprocess communication delays that these changs will introduce. +These new ABCI methods add more locations where the application must communicate with the consensus engine. +In most configurations, we expect that the consensus engine and the application will be either statically or dynamically linked, so all communication is a matter of at most adjusting the memory model the data is layed out within. +This memory model conversion is typically considered negligible, as delay here is measured on the order of microseconds at most, whereas we face milisecond delays due to cryptography and network overheads. +Thus we ignore the overhead in the case of linked libraries. + +In the case where the consensus engine and the application are ran in separate processes, and thus communicate with a form of Inter-process communication (IPC), the delays can easily become on the order of miliseconds based upon the data sent. Thus its important to consider whats happening here. +We go through this phase by phase. + +##### Prepare proposal IPC overhead + +This requires a round of IPC communication, where both directions are quite large. Namely the proposer communicating an entire block to the application. +However, this can be mitigated by splitting up `PrepareProposal` into two distinct, async methods, one for the block IPC communication, and one for the Header IPC communication. + +Then for chains where the block data does not depend on the header data, the block data IPC communication can proceed in parallel to the prior block's voting phase. (As a node can know whether or not its the leader in the next round) + +Furthermore, this IPC communication is expected to be quite low relative to the amount of p2p gossip time it takes to send the block data around the network, so this is perhaps a premature concern until more sophisticated block gossip protocols are implemented. + +##### Process Proposal IPC overhead + +This phase changes the amount of time available for the consensus engine to deliver a block's data to the state machine. +Before, the block data for block N would be delivered to the state machine upon receiving a commit for block N and then be executed. +The state machine would respond after executing the txs and before prevoting. +The time for block delivery from the consensus engine to the state machine after this change is the time of receiving block proposal N to the to time precommit on proposal N. +It is expected that this difference is unimportant in practice, as this time is in parallel to one round of p2p communication for prevoting, which is expected to be significantly less than the time for the consensus engine to deliver a block to the state machine. + +##### Vote Extension IPC overhead + +This has a small amount of data, but does incur an IPC round trip delay. This IPC round trip delay is pretty negligible as compared the variance in vote gossip time. (the IPC delay is typically on the order of 10 microseconds) + +## Status + +Proposed + +## Consequences + +### Positive + +- Enables a large number of new features for applications +- Supports both immediate and delayed execution models +- Allows application specific data from each validator +- Allows for batch optimizations across txs, and votes + +### Negative + +- This is a breaking change to all existing ABCI clients, however the application should be able to have a thin wrapper to replicate existing ABCI behavior. + - PrepareProposal - can be a no-op + - Process Proposal - has to cache the block, but can otherwise be a no-op + - Vote Extensions - can be a no-op + - Finalize Block - Can black-box call BeginBlock, DeliverTx, EndBlock given the cached block data + +- Vote Extensions adds more complexity to core Tendermint Data Structures +- Allowing alternate alternate execution models will lead to a proliferation of new ways for applications to violate expected guarantees. + +### Neutral + +- IPC overhead considerations change, but mostly for the better + +## References + +Reference for IPC delay constants: + +### Short list of blocked features / scaling improvements with required ABCI++ Phases + +| Feature | PrepareProposal | ProcessProposal | Vote Extensions | +| :--- | :---: | :---: | :---: | +| Tx based signature aggregation | X | | | +| SNARK proof of valid state transition | X | | | +| Validator provided authentication paths in stateless blockchains | X | | | +| Immediate Execution | | X | | +| Simple soft forks | | X | | +| Validator guaranteed IBC connection attempts | | | X | +| Validator based price oracles | | | X | +| Immediate Execution with increased time for block execution | X | X | X | +| Threshold Encrypted txs | X | X | X | diff --git a/docs/rfc/rfc-011-delete-gas.md b/docs/rfc/rfc-011-delete-gas.md new file mode 100644 index 000000000..a4e643ef2 --- /dev/null +++ b/docs/rfc/rfc-011-delete-gas.md @@ -0,0 +1,162 @@ +# RFC 011: Remove Gas From Tendermint + +## Changelog + +- 03-Feb-2022: Initial draft (@williambanfield). +- 10-Feb-2022: Update in response to feedback (@williambanfield). +- 11-Feb-2022: Add reflection on MaxGas during consensus (@williambanfield). + +## Abstract + +In the v0.25.0 release, Tendermint added a mechanism for tracking 'Gas' in the mempool. +At a high level, Gas allows applications to specify how much it will cost the network, +often in compute resources, to execute a given transaction. While such a mechanism is common +in blockchain applications, it is not generalizable enough to be a maintained as a part +of Tendermint. This RFC explores the possibility of removing the concept of Gas from +Tendermint while still allowing applications the power to control the contents of +blocks to achieve similar goals. + +## Background + +The notion of Gas was included in the original Ethereum whitepaper and exists as +an important feature of the Ethereum blockchain. + +The [whitepaper describes Gas][eth-whitepaper-messages] as an Anti-DoS mechanism. The Ethereum Virtual Machine +provides a Turing complete execution platform. Without any limitations, malicious +actors could waste computation resources by directing the EVM to perform large +or even infinite computations. Gas serves as a metering mechanism to prevent this. + +Gas appears to have been added to Tendermint multiple times, initially as part of +a now defunct `/vm` package, and in its most recent iteration [as part of v0.25.0][gas-add-pr] +as a mechanism to limit the transactions that will be included in the block by an additional +parameter. + +Gas has gained adoption within the Cosmos ecosystem [as part of the Cosmos SDK][cosmos-sdk-gas]. +The SDK provides facilities for tracking how much 'Gas' a transaction is expected to take +and a mechanism for tracking how much gas a transaction has already taken. + +Non-SDK applications also make use of the concept of Gas. Anoma appears to implement +[a gas system][anoma-gas] to meter the transactions it executes. + +While the notion of gas is present in projects that make use of Tendermint, it is +not a concern of Tendermint's. Tendermint's value and goal is producing blocks +via a distributed consensus algorithm. Tendermint relies on the application specific +code to decide how to handle the transactions Tendermint has produced (or if the +application wants to consider them at all). Gas is an application concern. + +Our implementation of Gas is not currently enforced by consensus. Our current validation check that +occurs during block propagation does not verify that the block is under the configured `MaxGas`. +Ensuring that the transactions in a proposed block do not exceed `MaxGas` would require +input from the application during propagation. The `ProcessProposal` method introduced +as part of ABCI++ would enable such input but would further entwine Tendermint and +the application. The issue of checking `MaxGas` during block propagation is important +because it demonstrates that the feature as it currently exists is not implemented +as fully as it perhaps should be. + +Our implementation of Gas is causing issues for node operators and relayers. At +the moment, transactions that overflow the configured 'MaxGas' can be silently rejected +from the mempool. Overflowing MaxGas is the _only_ way that a transaction can be considered +invalid that is not directly a result of failing the `CheckTx`. Operators, and the application, +do not know that a transaction was removed from the mempool for this reason. A stateless check +of this nature is exactly what `CheckTx` exists for and there is no reason for the mempool +to keep track of this data separately. A special [MempoolError][add-mempool-error] field +was added in v0.35 to communicate to clients that a transaction failed after `CheckTx`. +While this should alleviate the pain for operators wishing to understand if their +transaction was included in the mempool, it highlights that the abstraction of +what is included in the mempool is not currently well defined. + +Removing Gas from Tendermint and the mempool would allow for the mempool to be a better +abstraction: any transaction that arrived at `CheckTx` and passed the check will either be +a candidate for a later block or evicted after a TTL is reached or to make room for +other, higher priority transactions. All other transactions are completely invalid and can be discarded forever. + +Removing gas will not be completely straightforward. It will mean ensuring that +equivalent functionality can be implemented outside of the mempool using the mempool's API. + +## Discussion + +This section catalogs the functionality that will need to exist within the Tendermint +mempool to allow Gas to be removed and replaced by application-side bookkeeping. + +### Requirement: Provide Mempool Tx Sorting Mechanism + +Gas produces a market for inclusion in a block. On many networks, a [gas fee][cosmos-sdk-fees] is +included in pending transactions. This fee indicates how much a user is willing to +pay per unit of execution and the fees are distributed to validators. + +Validators wishing to extract higher gas fees are incentivized to include transactions +with the highest listed gas fees into each block. This produces a natural ordering +of the pending transactions. Applications wishing to implement a gas mechanism need +to be able to order the transactions in the mempool. This can trivially be accomplished +by sorting transactions using the `priority` field available to applications as part of +v0.35's `ResponseCheckTx` message. + +### Requirement: Allow Application-Defined Block Resizing + +When creating a block proposal, Tendermint pulls a set of possible transactions out of +the mempool to include in the next block. Tendermint uses MaxGas to limit the set of transactions +it pulls out of the mempool fetching a set of transactions whose sum is less than MaxGas. + +By removing gas tracking from Tendermint's mempool, Tendermint will need to provide a way for +applications to determine an acceptable set of transactions to include in the block. + +This is what the new ABCI++ `PrepareProposal` method is useful for. Applications +that wish to limit the contents of a block by an application-defined limit may +do so by removing transactions from the proposal it is passed during `PrepareProposal`. +Applications wishing to reach parity with the current Gas implementation may do +so by creating an application-side limit: filtering out transactions from +`PrepareProposal` the cause the proposal the exceed the maximum gas. Additionally, +applications can currently opt to have all transactions in the mempool delivered +during `PrepareProposal` by passing `-1` for `MaxGas` and `MaxBytes` into +[ReapMaxBytesMaxGas][reap-max-bytes-max-gas]. + +### Requirement: Handle Transaction Metadata + +Moving the gas mechanism into applications adds an additional piece of complexity +to applications. The application must now track how much gas it expects a transaction +to consume. The mempool currently handles this bookkeeping responsibility and uses the estimated +gas to determine the set of transactions to include in the block. In order to task +the application with keeping track of this metadata, we should make it easier for the +application to do so. In general, we'll want to keep only one copy of this type +of metadata in the program at a time, either in the application or in Tendermint. + +The following sections are possible solutions to the problem of storing transaction +metadata without duplication. + +#### Metadata Handling: EvictTx Callback + +A possible approach to handling transaction metadata is by adding a new `EvictTx` +ABCI method. Whenever the mempool is removing a transaction, either because it has +reached its TTL or because it failed `RecheckTx`, `EvictTx` would be called with +the transaction hash. This would indicate to the application that it could free any +metadata it was storing about the transaction such as the computed gas fee. + +Eviction callbacks are pretty common in caching systems, so this would be very +well-worn territory. + +#### Metadata Handling: Application-Specific Metadata Field(s) + +An alternative approach to handling transaction metadata would be would be the +addition of a new application-metadata field in the `ResponseCheckTx`. This field +would be a protocol buffer message whose contents were entirely opaque to Tendermint. +The application would be responsible for marshalling and unmarshalling whatever data +it stored in this field. During `PrepareProposal`, the application would be passed +this metadata along with the transaction, allowing the application to use it to perform +any necessary filtering. + +If either of these proposed metadata handling techniques are selected, it's likely +useful to enable applications to gossip metadata along with the transaction it is +gossiping. This could easily take the form of an opaque proto message that is +gossiped along with the transaction. + +## References + +[eth-whitepaper-messages]: https://ethereum.org/en/whitepaper/#messages-and-transactions +[gas-add-pr]: https://github.com/tendermint/tendermint/pull/2360 +[cosmos-sdk-gas]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/gas-fees.md +[cosmos-sdk-fees]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/tx-lifecycle.md#gas-and-fees +[anoma-gas]: https://github.com/anoma/anoma/blob/6974fe1532a59db3574fc02e7f7e65d1216c1eb2/docs/src/specs/ledger.md#transaction-execution +[cosmos-sdk-fee]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/types/tx/tx.pb.go#L780-L794 +[issue-7750]: https://github.com/tendermint/tendermint/issues/7750 +[reap-max-bytes-max-gas]: https://github.com/tendermint/tendermint/blob/1ac58469f32a98f1c0e2905ca1773d9eac7b7103/internal/mempool/types.go#L45 +[add-mempool-error]: https://github.com/tendermint/tendermint/blob/205bfca66f6da1b2dded381efb9ad3792f9404cf/rpc/coretypes/responses.go#L239 diff --git a/docs/rfc/rfc-012-semantic-versioning.md b/docs/rfc/rfc-012-semantic-versioning.md new file mode 100644 index 000000000..0d1d64390 --- /dev/null +++ b/docs/rfc/rfc-012-semantic-versioning.md @@ -0,0 +1,98 @@ +<<<<<<< HEAD:docs/rfc/rfc-012-semantic-versioning.md +# RFC 012: Semantic Versioning +======= +# RFC 014: Semantic Versioning +>>>>>>> a895a8ea5f (Rename and renumber imported RFCs.):docs/rfc/rfc-014-semantic-versioning.md + +## Changelog + +- 2021-11-19: Initial Draft +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 006](https://github.com/tendermint/spec/pull/365)) + +## Author(s) + +- Callum Waters @cmwaters + +## Context + +We use versioning as an instrument to hold a set of promises to users and signal when such a set changes and how. In the conventional sense of a Go library, major versions signal that the public Go API’s have changed in a breaking way and thus require the users of such libraries to change their usage accordingly. Tendermint is a bit different in that there are multiple users: application developers (both in-process and out-of-process), node operators, and external clients. More importantly, both how these users interact with Tendermint and what's important to these users differs from how users interact and what they find important in a more conventional library. + +This document attempts to encapsulate the discussions around versioning in Tendermint and draws upon them to propose a guide to how Tendermint uses versioning to make promises to its users. + +For a versioning policy to make sense, we must also address the intended frequency of breaking changes. The strictest guarantees in the world will not help users if we plan to break them with every release. + +Finally I would like to remark that this RFC only addresses the "what", as in what are the rules for versioning. The "how" of Tendermint implementing the versioning rules we choose, will be addressed in a later RFC on Soft Upgrades. + +## Discussion + +We first begin with a round up of the various users and a set of assumptions on what these users expect from Tendermint in regards to versioning: + +1. **Application Developers**, those that use the ABCI to build applications on top of Tendermint, are chiefly concerned with that API. Breaking changes will force developers to modify large portions of their codebase to accommodate for the changes. Some ABCI changes such as introducing priority for the mempool don't require any effort and can be lazily adopted whilst changes like ABCI++ may force applications to redesign their entire execution system. It's also worth considering that the API's for go developers differ to developers of other languages. The former here can use the entire Tendermint library, most notably the local RPC methods, and so the team must be wary of all public Go API's. +2. **Node Operators**, those running node infrastructure, are predominantly concerned with downtime, complexity and frequency of upgrading, and avoiding data loss. They may be also concerned about changes that may break the scripts and tooling they use to supervise their nodes. +3. **External Clients** are those that perform any of the following: + - consume the RPC endpoints of nodes like `/block` + - subscribe to the event stream + - make queries to the indexer + + This set are concerned with chain upgrades which will impact their ability to query state and block data as well as broadcast transactions. Examples include wallets and block explorers. + +4. **IBC module and relayers**. The developers of IBC and consumers of their software are concerned about changes that may affect a chain's ability to send arbitrary messages to another chain. Specifically, these users are affected by any breaking changes to the light client verification algorithm. + +Although we present them here as having different concerns, in a broader sense these user groups share a concern for the end users of applications. A crucial principle guiding this RFC is that **the ability for chains to provide continual service is more important than the actual upgrade burden put on the developers of these chains**. This means some extra burden for application developers is tolerable if it minimizes or substantially reduces downtime for the end user. + +### Modes of Interprocess Communication + +Tendermint has two primary mechanisms to communicate with other processes: RPC and P2P. The division marks the boundary between the internal and external components of the network: + +- The P2P layer is used in all cases that nodes (of any type) need to communicate with one another. +- The RPC interface is for any outside process that wants to communicate with a node. + +The design principle here is that **communication via RPC is to a trusted source** and thus the RPC service prioritizes inspection rather than verification. The P2P interface is the primary medium for verification. + +As an example, an in-browser light client would verify headers (and perhaps application state) via the p2p layer, and then pass along information on to the client via RPC (or potentially directly via a separate API). + +The main exceptions to this are the IBC module and relayers, which are external to the node but also require verifiable data. Breaking changes to the light client verification path mean that all neighbouring chains that are connected will no longer be able to verify state transitions and thus pass messages back and forward. + +## Proposal + +Tendermint version labels will follow the syntax of [Semantic Versions 2.0.0](https://semver.org/) with a major, minor and patch version. The version components will be interpreted according to these rules: + +For the entire cycle of a **major version** in Tendermint: + +- All blocks and state data in a blockchain can be queried. All headers can be verified even across minor version changes. Nodes can both block sync and state sync from genesis to the head of the chain. +- Nodes in a network are able to communicate and perform BFT state machine replication so long as the agreed network version is the lowest of all nodes in a network. For example, nodes using version 1.5.x and 1.2.x can operate together so long as the network version is 1.2 or lower (but still within the 1.x range). This rule essentially captures the concept of network backwards compatibility. +- Node RPC endpoints will remain compatible with existing external clients: + - New endpoints may be added, but old endpoints may not be removed. + - Old endpoints may be extended to add new request and response fields, but requests not using those fields must function as before the change. +- Migrations should be automatic. Upgrading of one node can happen asynchronously with respect to other nodes (although agreement of a network-wide upgrade must still occur synchronously via consensus). + +For the entire cycle of a **minor version** in Tendermint: + +- Public Go API's, for example in `node` or `abci` packages will not change in a way that requires any consumer (not just application developers) to modify their code. +- No breaking changes to the block protocol. This means that all block related data structures should not change in a way that breaks any of the hashes, the consensus engine or light client verification. +- Upgrades between minor versions may not result in any downtime (i.e., no migrations are required), nor require any changes to the config files to continue with the existing behavior. A minor version upgrade will require only stopping the existing process, swapping the binary, and starting the new process. + +A new **patch version** of Tendermint will only contain bug fixes and updates that impact the security and stability of Tendermint. + +These guarantees will come into effect at release 1.0. + +## Status + +Proposed + +## Consequences + +### Positive + +- Clearer communication of what versioning means to us and the effect they have on our users. + +### Negative + +- Can potentially incur greater engineering effort to uphold and follow these guarantees. + +### Neutral + +## References + +- [SemVer](https://semver.org/) +- [Tendermint Tracking Issue](https://github.com/tendermint/tendermint/issues/5680) diff --git a/rfc/004-abci++.md b/docs/rfc/rfc-013-abci++.md similarity index 99% rename from rfc/004-abci++.md rename to docs/rfc/rfc-013-abci++.md index 61106ecab..0289c187e 100644 --- a/rfc/004-abci++.md +++ b/docs/rfc/rfc-013-abci++.md @@ -1,8 +1,9 @@ -# RFC 004: ABCI++ +# RFC 013: ABCI++ ## Changelog -- January 11, 2020: initialized +- 2020-01-11: initialized +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254)) ## Author(s) diff --git a/rfc/006-semantic-versioning.md b/docs/rfc/rfc-014-semantic-versioning.md similarity index 98% rename from rfc/006-semantic-versioning.md rename to docs/rfc/rfc-014-semantic-versioning.md index e5de5e173..0119901b1 100644 --- a/rfc/006-semantic-versioning.md +++ b/docs/rfc/rfc-014-semantic-versioning.md @@ -1,8 +1,9 @@ -# RFC 006: Semantic Versioning +# RFC 014: Semantic Versioning ## Changelog - 2021-11-19: Initial Draft +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 006](https://github.com/tendermint/spec/pull/365)) ## Author(s) diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md index 94acbc642..26428a754 100644 --- a/docs/roadmap/roadmap.md +++ b/docs/roadmap/roadmap.md @@ -8,7 +8,7 @@ order: 1 This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams. -Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint). +Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/tendermint/issues/new/choose). Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint). This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status. @@ -43,7 +43,7 @@ Added a new `EventSink` interface to allow alternatives to Tendermint's propriet ### ABCI++ -An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, and complete delivery of blocks after agreement (to allow for concurrent execution). It enables both immediate and delayed agreement. [More](https://github.com/tendermint/spec/blob/master/spec/abci++/README.md) +An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, and complete delivery of blocks after agreement (to allow for concurrent execution). It enables both immediate and delayed agreement. [More](https://github.com/tendermint/tendermint/blob/master/spec/abci++/README.md) ### Proposer-Based Timestamps diff --git a/docs/tendermint-core/block-structure.md b/docs/tendermint-core/block-structure.md index 4563084a6..0fa52b4d9 100644 --- a/docs/tendermint-core/block-structure.md +++ b/docs/tendermint-core/block-structure.md @@ -11,6 +11,6 @@ nodes. This blockchain is accessible via various RPC endpoints, mainly `/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what exactly is stored in these blocks? -The [specification](https://github.com/tendermint/spec/blob/8dd2ed4c6fe12459edeb9b783bdaaaeb590ec15c/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. +The [specification](https://github.com/tendermint/tendermint/tree/master/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. To dig deeper, check out the [types package documentation](https://godoc.org/github.com/tendermint/tendermint/types). diff --git a/docs/tendermint-core/consensus/README.md b/docs/tendermint-core/consensus/README.md index bd7def551..1bf9662df 100644 --- a/docs/tendermint-core/consensus/README.md +++ b/docs/tendermint-core/consensus/README.md @@ -23,7 +23,7 @@ explained in a forthcoming document. For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the block as the block size is big, i.e., they don't embed the block inside `Proposal` and `VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in -[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section) +[Blockchain](https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md#blockid) section) that uniquely identifies each block. The block itself is disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a proposer first splitting a block into a number of block parts, that are then gossiped between diff --git a/docs/tendermint-core/subscription.md b/docs/tendermint-core/subscription.md index 752fb0cef..0f452c563 100644 --- a/docs/tendermint-core/subscription.md +++ b/docs/tendermint-core/subscription.md @@ -43,7 +43,7 @@ transactions](../app-dev/indexing-transactions.md) for details. When validator set changes, ValidatorSetUpdates event is published. The event carries a list of pubkey/power pairs. The list is the same Tendermint receives from ABCI application (see [EndBlock -section](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#endblock) in +section](https://github.com/tendermint/tendermint/blob/master/spec/abci/abci.md#endblock) in the ABCI spec). Response: diff --git a/go.mod b/go.mod index 5e7a24d94..648d47b77 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/golangci/golangci-lint v1.44.0 github.com/google/orderedcode v0.0.1 github.com/google/uuid v1.3.0 - github.com/gorilla/websocket v1.4.2 + github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/lib/pq v1.10.4 @@ -23,7 +23,6 @@ require ( github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible github.com/prometheus/client_golang v1.12.1 - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.8.2 github.com/rs/zerolog v1.26.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa diff --git a/go.sum b/go.sum index 85daf9da7..29bad59cd 100644 --- a/go.sum +++ b/go.sum @@ -477,8 +477,8 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= @@ -865,8 +865,6 @@ github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go index 0718fee16..0306a31c0 100644 --- a/internal/blocksync/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -125,7 +125,6 @@ func TestBlockPoolBasic(t *testing.T) { case err := <-errorsCh: t.Error(err) case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %v", request) if request.Height == 300 { return // Done! } @@ -139,21 +138,19 @@ func TestBlockPoolTimeout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + logger := log.TestingLogger() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) + pool := NewBlockPool(logger, start, requestsCh, errorsCh) err := pool.Start(ctx) if err != nil { t.Error(err) } t.Cleanup(func() { cancel(); pool.Wait() }) - for _, peer := range peers { - t.Logf("Peer %v", peer.id) - } - // Introduce each peer. go func() { for _, peer := range peers { @@ -182,7 +179,6 @@ func TestBlockPoolTimeout(t *testing.T) { for { select { case err := <-errorsCh: - t.Log(err) // consider error to be always timeout here if _, ok := timedOut[err.peerID]; !ok { counter++ @@ -191,7 +187,9 @@ func TestBlockPoolTimeout(t *testing.T) { } } case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %+v", request) + logger.Debug("received request", + "counter", counter, + "request", request) } } } diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 25814a2ea..3a6b7b2f5 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -203,7 +203,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -243,7 +243,7 @@ func TestReactor_SyncTime(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -271,7 +271,7 @@ func TestReactor_NoBlockResponse(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -323,7 +323,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) diff --git a/internal/consensus/README.md b/internal/consensus/README.md deleted file mode 100644 index 3f32d7e46..000000000 --- a/internal/consensus/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Consensus - -See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus). diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 50b4ffc30..6ddd2ae44 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -60,7 +60,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NoError(t, err) require.NoError(t, stateStore.Save(state)) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) @@ -180,6 +180,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NotNil(t, lazyNodeState.privValidator) var commit *types.Commit + var votes []*types.Vote switch { case lazyNodeState.Height == lazyNodeState.state.InitialHeight: // We're creating a proposal for the first block. @@ -188,6 +189,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { case lazyNodeState.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = lazyNodeState.LastCommit.MakeCommit() + votes = lazyNodeState.LastCommit.GetVotes() default: // This shouldn't happen. lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return @@ -205,7 +207,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { proposerAddr := lazyNodeState.privValidatorPubKey.Address() block, blockParts, err := lazyNodeState.blockExec.CreateProposalBlock( - ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, + ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, votes, ) require.NoError(t, err) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 2ec3dda9a..bddc2c2c3 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -50,23 +50,23 @@ type cleanupFunc func() func configSetup(t *testing.T) *config.Config { t.Helper() - cfg, err := ResetConfig("consensus_reactor_test") + cfg, err := ResetConfig(t.TempDir(), "consensus_reactor_test") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) - consensusReplayConfig, err := ResetConfig("consensus_replay_test") + consensusReplayConfig, err := ResetConfig(t.TempDir(), "consensus_replay_test") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(consensusReplayConfig.RootDir) }) - configStateTest, err := ResetConfig("consensus_state_test") + configStateTest, err := ResetConfig(t.TempDir(), "consensus_state_test") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(configStateTest.RootDir) }) - configMempoolTest, err := ResetConfig("consensus_mempool_test") + configMempoolTest, err := ResetConfig(t.TempDir(), "consensus_mempool_test") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(configMempoolTest.RootDir) }) - configByzantineTest, err := ResetConfig("consensus_byzantine_test") + configByzantineTest, err := ResetConfig(t.TempDir(), "consensus_byzantine_test") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) }) @@ -78,8 +78,8 @@ func ensureDir(t *testing.T, dir string, mode os.FileMode) { require.NoError(t, tmos.EnsureDir(dir, mode)) } -func ResetConfig(name string) (*config.Config, error) { - return config.ResetTestRoot(name) +func ResetConfig(dir, name string) (*config.Config, error) { + return config.ResetTestRoot(dir, name) } //------------------------------------------------------------------------------- @@ -422,7 +422,7 @@ func newState( ) *State { t.Helper() - cfg, err := config.ResetTestRoot("consensus_state_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "consensus_state_test") require.NoError(t, err) return newStateWithConfig(ctx, t, logger, cfg, state, pv, app) @@ -769,7 +769,7 @@ func makeConsensusState( blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) configRootDirs = append(configRootDirs, thisConfig.RootDir) @@ -827,7 +827,7 @@ func randConsensusNetWithPeers( configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { state, _ := sm.MakeGenesisState(genDoc) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) configRootDirs = append(configRootDirs, thisConfig.RootDir) @@ -839,10 +839,10 @@ func randConsensusNetWithPeers( if i < nValidators { privVal = privVals[i] } else { - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) privVal, err = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -946,8 +946,7 @@ func (*mockTicker) SetLogger(log.Logger) {} func newPersistentKVStore(t *testing.T, logger log.Logger) abci.Application { t.Helper() - dir, err := os.MkdirTemp("", "persistent-kvstore") - require.NoError(t, err) + dir := t.TempDir() return kvstore.NewPersistentKVStoreApplication(logger, dir) } diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index 541b1bbc4..a3e865c68 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 0cbee4fcd..6d0d0d81b 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -35,7 +35,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { baseConfig := configSetup(t) - config, err := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) @@ -62,7 +62,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config, err := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) @@ -87,7 +87,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config, err := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) @@ -192,8 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(0)) - resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver)) + resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) + assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver)) resCommit := app.Commit() assert.True(t, len(resCommit.Data) > 0) @@ -264,15 +264,21 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)} } -func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - txValue := txAsUint64(req.Tx) - if txValue != uint64(app.txCount) { - return abci.ResponseDeliverTx{ - Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} +func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock { + respTxs := make([]*abci.ResponseDeliverTx, len(req.Txs)) + for i, tx := range req.Txs { + txValue := txAsUint64(tx) + if txValue != uint64(app.txCount) { + respTxs[i] = &abci.ResponseDeliverTx{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %d, got %d", app.txCount, txValue), + } + continue + } + app.txCount++ + respTxs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK} } - app.txCount++ - return abci.ResponseDeliverTx{Code: code.CodeTypeOK} + return abci.ResponseFinalizeBlock{Txs: respTxs} } func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index cf99fa0e4..7526518dc 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -3,6 +3,7 @@ package consensus import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" + "github.com/tendermint/tendermint/types" prometheus "github.com/go-kit/kit/metrics/prometheus" diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index 5ac592f0d..b254fc701 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -4,6 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" ) diff --git a/internal/consensus/mocks/fast_sync_reactor.go b/internal/consensus/mocks/fast_sync_reactor.go index 9da851065..06886de27 100644 --- a/internal/consensus/mocks/fast_sync_reactor.go +++ b/internal/consensus/mocks/fast_sync_reactor.go @@ -4,6 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" time "time" diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index e8c2df745..167a9c46f 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -391,7 +391,7 @@ func TestReactorWithEvidence(t *testing.T) { stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index ec16ed556..fc0ec312f 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -87,20 +87,15 @@ type mockProxyApp struct { abciResponses *tmstate.ABCIResponses } -func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - r := mock.abciResponses.DeliverTxs[mock.txCount] +func (mock *mockProxyApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock { + r := mock.abciResponses.FinalizeBlock mock.txCount++ if r == nil { - return abci.ResponseDeliverTx{} + return abci.ResponseFinalizeBlock{} } return *r } -func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - mock.txCount = 0 - return *mock.abciResponses.EndBlock -} - func (mock *mockProxyApp) Commit() abci.ResponseCommit { return abci.ResponseCommit{Data: mock.appHash} } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 48be064e8..c39f36611 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -142,7 +142,7 @@ func TestWALCrash(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - consensusReplayConfig, err := ResetConfig(tc.name) + consensusReplayConfig, err := ResetConfig(t.TempDir(), tc.name) require.NoError(t, err) crashWALandCheckLiveness(ctx, t, consensusReplayConfig, tc.initFn, tc.heightToStop) }) @@ -665,12 +665,13 @@ func TestMockProxyApp(t *testing.T) { logger := log.TestingLogger() var validTxs, invalidTxs = 0, 0 - txIndex := 0 + txCount := 0 assert.NotPanics(t, func() { abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses) - abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0) - abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{}) + abciResWithEmptyDeliverTx.FinalizeBlock = new(abci.ResponseFinalizeBlock) + abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0) + abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{}) // called when saveABCIResponses: bytes, err := proto.Marshal(abciResWithEmptyDeliverTx) @@ -685,31 +686,33 @@ func TestMockProxyApp(t *testing.T) { require.NoError(t, err) abciRes := new(tmstate.ABCIResponses) - abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) + abciRes.FinalizeBlock = new(abci.ResponseFinalizeBlock) + abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs)) someTx := []byte("tx") - resp, err := mock.DeliverTx(ctx, abci.RequestDeliverTx{Tx: someTx}) + resp, err := mock.FinalizeBlock(ctx, abci.RequestFinalizeBlock{Txs: [][]byte{someTx}}) + require.NoError(t, err) // TODO: make use of res.Log // TODO: make use of this info // Blocks may include invalid txs. - if resp.Code == abci.CodeTypeOK { - validTxs++ - } else { - invalidTxs++ + for _, tx := range resp.Txs { + if tx.Code == abci.CodeTypeOK { + validTxs++ + } else { + invalidTxs++ + } + txCount++ } - abciRes.DeliverTxs[txIndex] = resp - txIndex++ - - assert.NoError(t, err) }) - assert.True(t, validTxs == 1) - assert.True(t, invalidTxs == 0) + require.Equal(t, 1, txCount) + require.Equal(t, 1, validTxs) + require.Zero(t, invalidTxs) } func tempWALWithData(t *testing.T, data []byte) string { t.Helper() - walFile, err := os.CreateTemp("", "wal") + walFile, err := os.CreateTemp(t.TempDir(), "wal") require.NoError(t, err, "failed to create temp WAL file") _, err = walFile.Write(data) @@ -743,7 +746,7 @@ func testHandshakeReplay( logger := log.TestingLogger() if testValidatorsChange { - testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_m", t.Name(), mode)) require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() stateDB = dbm.NewMemDB() @@ -754,7 +757,7 @@ func testHandshakeReplay( commits = sim.Commits store = newMockBlockStore(t, cfg, genesisState.ConsensusParams) } else { // test single node - testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_s", t.Name(), mode)) require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() walBody, err := WALWithNBlocks(ctx, t, logger, numBlocks) @@ -1004,7 +1007,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := ResetConfig("handshake_test_") + cfg, err := ResetConfig(t.TempDir(), "handshake_test_") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) @@ -1288,7 +1291,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := abciclient.NewLocalCreator(app) - cfg, err := ResetConfig("handshake_test_") + cfg, err := ResetConfig(t.TempDir(), "handshake_test_") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index eb5fb65f1..eb0c26bed 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -1334,6 +1334,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, b } var commit *types.Commit + var votes []*types.Vote switch { case cs.Height == cs.state.InitialHeight: // We're creating a proposal for the first block. @@ -1343,6 +1344,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, b case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = cs.LastCommit.MakeCommit() + votes = cs.LastCommit.GetVotes() default: // This shouldn't happen. cs.logger.Error("propose step; cannot propose anything without commit for the previous block") @@ -1358,7 +1360,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, b proposerAddr := cs.privValidatorPubKey.Address() - return cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr) + return cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr, votes) } // Enter: `timeoutPropose` after entering Propose. diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index 3ebfcf2ee..671c5f214 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -2,11 +2,10 @@ package types import ( "context" - "log" - "os" "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" @@ -16,40 +15,33 @@ import ( "github.com/tendermint/tendermint/types" ) -var cfg *config.Config // NOTE: must be reset for each _test.go file - -func TestMain(m *testing.M) { - var err error - cfg, err = config.ResetTestRoot("consensus_height_vote_set_test") +func TestPeerCatchupRounds(t *testing.T) { + cfg, err := config.ResetTestRoot(t.TempDir(), "consensus_height_vote_set_test") if err != nil { - log.Fatal(err) + t.Fatal(err) } - code := m.Run() - os.RemoveAll(cfg.RootDir) - os.Exit(code) -} -func TestPeerCatchupRounds(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() valSet, privVals := factory.ValidatorSet(ctx, t, 10, 1) - hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet) + chainID := cfg.ChainID() + hvs := NewHeightVoteSet(chainID, 1, valSet) - vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals) + vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals, chainID) added, err := hvs.AddVote(vote999_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals) + vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals, chainID) added, err = hvs.AddVote(vote1000_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals) + vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals, chainID) added, err = hvs.AddVote(vote1001_0, "peer1") if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -71,6 +63,7 @@ func makeVoteHR( height int64, valIndex, round int32, privVals []types.PrivValidator, + chainID string, ) *types.Vote { t.Helper() @@ -89,7 +82,6 @@ func makeVoteHR( Type: tmproto.PrecommitType, BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, } - chainID := cfg.ChainID() v := vote.ToProto() err = privVal.SignVote(ctx, chainID, v) diff --git a/internal/consensus/types/peer_round_state_test.go b/internal/consensus/types/peer_round_state_test.go index 393fd2056..6d76750a7 100644 --- a/internal/consensus/types/peer_round_state_test.go +++ b/internal/consensus/types/peer_round_state_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/bits" ) diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 19d447222..76294288c 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -145,7 +145,7 @@ func makeAddrs() (p2pAddr, rpcAddr string) { // getConfig returns a config for test cases func getConfig(t *testing.T) *config.Config { - c, err := config.ResetTestRoot(t.Name()) + c, err := config.ResetTestRoot(t.TempDir(), t.Name()) require.NoError(t, err) p2pAddr, rpcAddr := makeAddrs() diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go index 3d5f7d82f..5f9ddb39f 100644 --- a/internal/eventbus/event_bus.go +++ b/internal/eventbus/event_bus.go @@ -89,7 +89,7 @@ func (b *EventBus) Publish(ctx context.Context, eventValue string, eventData typ } func (b *EventBus) PublishEventNewBlock(ctx context.Context, data types.EventDataNewBlock) error { - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + events := data.ResultFinalizeBlock.Events // add Tendermint-reserved new block event events = append(events, types.EventNewBlock) @@ -100,7 +100,7 @@ func (b *EventBus) PublishEventNewBlock(ctx context.Context, data types.EventDat func (b *EventBus) PublishEventNewBlockHeader(ctx context.Context, data types.EventDataNewBlockHeader) error { // no explicit deadline for publishing events - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + events := data.ResultFinalizeBlock.Events // add Tendermint-reserved new block header event events = append(events, types.EventNewBlockHeader) diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go index 8ecd1f08a..6e8c4e288 100644 --- a/internal/eventbus/event_bus_test.go +++ b/internal/eventbus/event_bus_test.go @@ -83,14 +83,12 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { bps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - resultBeginBlock := abci.ResponseBeginBlock{ + resultFinalizeBlock := abci.ResponseFinalizeBlock{ Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, + {Type: "testType", Attributes: []abci.EventAttribute{ + {Key: "baz", Value: "1"}, + {Key: "foz", Value: "2"}, + }}, }, } @@ -111,15 +109,13 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { edt := msg.Data().(types.EventDataNewBlock) assert.Equal(t, block, edt.Block) assert.Equal(t, blockID, edt.BlockID) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) + assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock) }() err = eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, + Block: block, + BlockID: blockID, + ResultFinalizeBlock: resultFinalizeBlock, }) assert.NoError(t, err) @@ -256,14 +252,12 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { require.NoError(t, err) block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) - resultBeginBlock := abci.ResponseBeginBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ + resultFinalizeBlock := abci.ResponseFinalizeBlock{ Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, + {Type: "testType", Attributes: []abci.EventAttribute{ + {Key: "baz", Value: "1"}, + {Key: "foz", Value: "2"}, + }}, }, } @@ -283,14 +277,12 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { edt := msg.Data().(types.EventDataNewBlockHeader) assert.Equal(t, block.Header, edt.Header) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) + assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock) }() err = eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{ - Header: block.Header, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, + Header: block.Header, + ResultFinalizeBlock: resultFinalizeBlock, }) assert.NoError(t, err) diff --git a/internal/evidence/doc.go b/internal/evidence/doc.go index d521debd3..01d99ee36 100644 --- a/internal/evidence/doc.go +++ b/internal/evidence/doc.go @@ -1,7 +1,7 @@ /* Package evidence handles all evidence storage and gossiping from detection to block proposal. For the different types of evidence refer to the `evidence.go` file in the types package -or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md. +or https://github.com/tendermint/tendermint/blob/master/spec/consensus/light-client/accountability.md. Gossiping diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index ef3346b2a..5ea8d8344 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -4,6 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + types "github.com/tendermint/tendermint/types" ) diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index c4ec3695e..810706607 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -14,6 +14,7 @@ import ( "github.com/fortytw2/leaktest" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect" @@ -28,7 +29,7 @@ import ( ) func TestInspectConstructor(t *testing.T) { - cfg, err := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot(t.TempDir(), "test") require.NoError(t, err) testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) @@ -43,7 +44,7 @@ func TestInspectConstructor(t *testing.T) { } func TestInspectRun(t *testing.T) { - cfg, err := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot(t.TempDir(), "test") require.NoError(t, err) testLogger := log.TestingLogger() @@ -263,13 +264,13 @@ func TestBlockResults(t *testing.T) { stateStoreMock := &statemocks.Store{} // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ - DeliverTxs: []*abcitypes.ResponseDeliverTx{ - { - GasUsed: testGasUsed, + FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ + Txs: []*abcitypes.ResponseDeliverTx{ + { + GasUsed: testGasUsed, + }, }, }, - EndBlock: &abcitypes.ResponseEndBlock{}, - BeginBlock: &abcitypes.ResponseBeginBlock{}, }, nil) blockStoreMock := &statemocks.BlockStore{} blockStoreMock.On("Base").Return(int64(0)) diff --git a/internal/libs/autofile/autofile_test.go b/internal/libs/autofile/autofile_test.go index dc5ba0682..9dbba276a 100644 --- a/internal/libs/autofile/autofile_test.go +++ b/internal/libs/autofile/autofile_test.go @@ -25,11 +25,7 @@ func TestSIGHUP(t *testing.T) { }) // First, create a temporary directory and move into it - dir, err := os.MkdirTemp("", "sighup_test") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(dir) - }) + dir := t.TempDir() require.NoError(t, os.Chdir(dir)) // Create an AutoFile in the temporary directory @@ -48,9 +44,7 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, os.Rename(name, name+"_old")) // Move into a different temporary directory - otherDir, err := os.MkdirTemp("", "sighup_test_other") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(otherDir) }) + otherDir := t.TempDir() require.NoError(t, os.Chdir(otherDir)) // Send SIGHUP to self. @@ -112,7 +106,7 @@ func TestAutoFileSize(t *testing.T) { defer cancel() // First, create an AutoFile writing to a tempfile dir - f, err := os.CreateTemp("", "sighup_test") + f, err := os.CreateTemp(t.TempDir(), "sighup_test") require.NoError(t, err) require.NoError(t, f.Close()) diff --git a/internal/libs/autofile/group_test.go b/internal/libs/autofile/group_test.go index f6b3eaab6..e20604d82 100644 --- a/internal/libs/autofile/group_test.go +++ b/internal/libs/autofile/group_test.go @@ -132,11 +132,8 @@ func TestRotateFile(t *testing.T) { } }() - dir, err := os.MkdirTemp("", "rotate_test") - require.NoError(t, err) - defer os.RemoveAll(dir) - err = os.Chdir(dir) - require.NoError(t, err) + dir := t.TempDir() + require.NoError(t, os.Chdir(dir)) require.True(t, filepath.IsAbs(g.Head.Path)) require.True(t, filepath.IsAbs(g.Dir)) diff --git a/internal/libs/sync/closer.go b/internal/libs/sync/closer.go deleted file mode 100644 index 815ee1e80..000000000 --- a/internal/libs/sync/closer.go +++ /dev/null @@ -1,31 +0,0 @@ -package sync - -import "sync" - -// Closer implements a primitive to close a channel that signals process -// termination while allowing a caller to call Close multiple times safely. It -// should be used in cases where guarantees cannot be made about when and how -// many times closure is executed. -type Closer struct { - closeOnce sync.Once - doneCh chan struct{} -} - -// NewCloser returns a reference to a new Closer. -func NewCloser() *Closer { - return &Closer{doneCh: make(chan struct{})} -} - -// Done returns the internal done channel allowing the caller either block or wait -// for the Closer to be terminated/closed. -func (c *Closer) Done() <-chan struct{} { - return c.doneCh -} - -// Close gracefully closes the Closer. A caller should only call Close once, but -// it is safe to call it successive times. -func (c *Closer) Close() { - c.closeOnce.Do(func() { - close(c.doneCh) - }) -} diff --git a/internal/libs/sync/closer_test.go b/internal/libs/sync/closer_test.go deleted file mode 100644 index aea915215..000000000 --- a/internal/libs/sync/closer_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package sync_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -func TestCloser(t *testing.T) { - closer := tmsync.NewCloser() - - var timeout bool - - select { - case <-closer.Done(): - case <-time.After(time.Second): - timeout = true - } - - for i := 0; i < 10; i++ { - closer.Close() - } - - require.True(t, timeout) - <-closer.Done() -} diff --git a/internal/libs/tempfile/tempfile_test.go b/internal/libs/tempfile/tempfile_test.go index 5c38f9736..aee540c59 100644 --- a/internal/libs/tempfile/tempfile_test.go +++ b/internal/libs/tempfile/tempfile_test.go @@ -21,7 +21,7 @@ func TestWriteFileAtomic(t *testing.T) { perm os.FileMode = 0600 ) - f, err := os.CreateTemp("/tmp", "write-atomic-test-") + f, err := os.CreateTemp(t.TempDir(), "write-atomic-test-") if err != nil { t.Fatal(err) } diff --git a/internal/mempool/ids_test.go b/internal/mempool/ids_test.go index 006ad5ced..6601706bc 100644 --- a/internal/mempool/ids_test.go +++ b/internal/mempool/ids_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" ) diff --git a/internal/mempool/mempool_bench_test.go b/internal/mempool/mempool_bench_test.go index 82848dbfb..088af174a 100644 --- a/internal/mempool/mempool_bench_test.go +++ b/internal/mempool/mempool_bench_test.go @@ -14,8 +14,13 @@ func BenchmarkTxMempool_CheckTx(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + // setup the cache and the mempool number for hitting GetEvictableTxs during the + // benchmark. 5000 is the current default mempool size in the TM config. txmp := setup(ctx, b, 10000) + txmp.config.Size = 5000 + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + const peerID = 1 b.ResetTimer() @@ -26,9 +31,11 @@ func BenchmarkTxMempool_CheckTx(b *testing.B) { require.NoError(b, err) priority := int64(rng.Intn(9999-1000) + 1000) - tx := []byte(fmt.Sprintf("%X=%d", prefix, priority)) + tx := []byte(fmt.Sprintf("sender-%d-%d=%X=%d", n, peerID, prefix, priority)) + txInfo := TxInfo{SenderID: uint16(peerID)} + b.StartTimer() - require.NoError(b, txmp.CheckTx(ctx, tx, nil, TxInfo{})) + require.NoError(b, txmp.CheckTx(ctx, tx, nil, txInfo)) } } diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 1aba029a4..21e3743ed 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -82,7 +82,7 @@ func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoo cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() - cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|")) require.NoError(t, err) cfg.Mempool.CacheSize = cacheSize appConnMem, err := cc(logger) diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index b64b22120..9c849cf2a 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -12,6 +12,7 @@ import ( "github.com/fortytw2/leaktest" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" @@ -41,7 +42,7 @@ type reactorTestSuite struct { func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { t.Helper() - cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|")) require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) diff --git a/internal/mempool/tx_test.go b/internal/mempool/tx_test.go index b68246076..c6d494b04 100644 --- a/internal/mempool/tx_test.go +++ b/internal/mempool/tx_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" ) diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index 1faa2a6d0..d7dad4d3b 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/types" ) diff --git a/internal/p2p/metrics_test.go b/internal/p2p/metrics_test.go index 53b3c47bd..839786d91 100644 --- a/internal/p2p/metrics_test.go +++ b/internal/p2p/metrics_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/proto/tendermint/p2p" ) diff --git a/internal/p2p/p2ptest/util.go b/internal/p2p/p2ptest/util.go index 544e937bb..e0d18caae 100644 --- a/internal/p2p/p2ptest/util.go +++ b/internal/p2p/p2ptest/util.go @@ -2,6 +2,7 @@ package p2ptest import ( gogotypes "github.com/gogo/protobuf/types" + "github.com/tendermint/tendermint/types" ) diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index ebfa2885b..21c950dfb 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -5,10 +5,11 @@ import ( "context" "sort" "strconv" + "sync" "time" "github.com/gogo/protobuf/proto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" ) @@ -78,8 +79,10 @@ type pqScheduler struct { enqueueCh chan Envelope dequeueCh chan Envelope - closer *tmsync.Closer - done *tmsync.Closer + + closeFn func() + closeCh <-chan struct{} + done chan struct{} } func newPQScheduler( @@ -108,6 +111,9 @@ func newPQScheduler( pq := make(priorityQueue, 0) heap.Init(&pq) + closeCh := make(chan struct{}) + once := &sync.Once{} + return &pqScheduler{ logger: logger.With("router", "scheduler"), metrics: m, @@ -118,32 +124,18 @@ func newPQScheduler( sizes: sizes, enqueueCh: make(chan Envelope, enqueueBuf), dequeueCh: make(chan Envelope, dequeueBuf), - closer: tmsync.NewCloser(), - done: tmsync.NewCloser(), + closeFn: func() { once.Do(func() { close(closeCh) }) }, + closeCh: closeCh, + done: make(chan struct{}), } } -func (s *pqScheduler) enqueue() chan<- Envelope { - return s.enqueueCh -} - -func (s *pqScheduler) dequeue() <-chan Envelope { - return s.dequeueCh -} - -func (s *pqScheduler) close() { - s.closer.Close() - <-s.done.Done() -} - -func (s *pqScheduler) closed() <-chan struct{} { - return s.closer.Done() -} - // start starts non-blocking process that starts the priority queue scheduler. -func (s *pqScheduler) start(ctx context.Context) { - go s.process(ctx) -} +func (s *pqScheduler) start(ctx context.Context) { go s.process(ctx) } +func (s *pqScheduler) enqueue() chan<- Envelope { return s.enqueueCh } +func (s *pqScheduler) dequeue() <-chan Envelope { return s.dequeueCh } +func (s *pqScheduler) close() { s.closeFn() } +func (s *pqScheduler) closed() <-chan struct{} { return s.done } // process starts a block process where we listen for Envelopes to enqueue. If // there is sufficient capacity, it will be enqueued into the priority queue, @@ -155,7 +147,7 @@ func (s *pqScheduler) start(ctx context.Context) { // After we attempt to enqueue the incoming Envelope, if the priority queue is // non-empty, we pop the top Envelope and send it on the dequeueCh. func (s *pqScheduler) process(ctx context.Context) { - defer s.done.Close() + defer close(s.done) for { select { @@ -264,13 +256,13 @@ func (s *pqScheduler) process(ctx context.Context) { "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { case s.dequeueCh <- pqEnv.envelope: - case <-s.closer.Done(): + case <-s.closeCh: return } } case <-ctx.Done(): return - case <-s.closer.Done(): + case <-s.closeCh: return } } diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index 03841d000..22ecbcecb 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -6,6 +6,7 @@ import ( "time" gogotypes "github.com/gogo/protobuf/types" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/internal/p2p/queue.go b/internal/p2p/queue.go index cf36d3ca6..2ce2f23fe 100644 --- a/internal/p2p/queue.go +++ b/internal/p2p/queue.go @@ -1,7 +1,7 @@ package p2p import ( - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "sync" ) // default capacity for the size of a queue @@ -32,28 +32,22 @@ type queue interface { // in the order they were received, and blocks until message is received. type fifoQueue struct { queueCh chan Envelope - closer *tmsync.Closer + closeFn func() + closeCh <-chan struct{} } func newFIFOQueue(size int) queue { + closeCh := make(chan struct{}) + once := &sync.Once{} + return &fifoQueue{ queueCh: make(chan Envelope, size), - closer: tmsync.NewCloser(), + closeFn: func() { once.Do(func() { close(closeCh) }) }, + closeCh: closeCh, } } -func (q *fifoQueue) enqueue() chan<- Envelope { - return q.queueCh -} - -func (q *fifoQueue) dequeue() <-chan Envelope { - return q.queueCh -} - -func (q *fifoQueue) close() { - q.closer.Close() -} - -func (q *fifoQueue) closed() <-chan struct{} { - return q.closer.Done() -} +func (q *fifoQueue) enqueue() chan<- Envelope { return q.queueCh } +func (q *fifoQueue) dequeue() <-chan Envelope { return q.queueCh } +func (q *fifoQueue) close() { q.closeFn() } +func (q *fifoQueue) closed() <-chan struct{} { return q.closeCh } diff --git a/internal/p2p/router_filter_test.go b/internal/p2p/router_filter_test.go index 4082dc928..8915dc888 100644 --- a/internal/p2p/router_filter_test.go +++ b/internal/p2p/router_filter_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" ) @@ -29,6 +29,6 @@ func TestConnectionFiltering(t *testing.T) { }, } require.Equal(t, 0, filterByIPCount) - router.openConnection(ctx, &MemoryConnection{logger: logger, closer: sync.NewCloser()}) + router.openConnection(ctx, &MemoryConnection{logger: logger, closeFn: func() {}}) require.Equal(t, 1, filterByIPCount) } diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index b2a8fe1a0..19b4aa94c 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index e4d78529a..2b5632104 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -19,7 +19,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/mocks" "github.com/tendermint/tendermint/internal/p2p/p2ptest" @@ -385,12 +384,12 @@ func TestRouter_AcceptPeers(t *testing.T) { t.Cleanup(leaktest.Check(t)) // Set up a mock transport that handshakes. - closer := tmsync.NewCloser() + connCtx, connCancel := context.WithCancel(context.Background()) mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil).Maybe() + mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) if tc.ok { mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() @@ -433,7 +432,7 @@ func TestRouter_AcceptPeers(t *testing.T) { time.Sleep(time.Millisecond) } else { select { - case <-closer.Done(): + case <-connCtx.Done(): case <-time.After(100 * time.Millisecond): require.Fail(t, "connection not closed") } @@ -620,13 +619,14 @@ func TestRouter_DialPeers(t *testing.T) { endpoint := p2p.Endpoint{Protocol: "mock", Path: string(tc.dialID)} // Set up a mock transport that handshakes. - closer := tmsync.NewCloser() + connCtx, connCancel := context.WithCancel(context.Background()) + defer connCancel() mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") if tc.dialErr == nil { mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil).Maybe() + mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() } if tc.ok { mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() @@ -644,7 +644,7 @@ func TestRouter_DialPeers(t *testing.T) { mockTransport.On("Dial", mock.Anything, endpoint).Maybe().Return(nil, io.EOF) } else { mockTransport.On("Dial", mock.Anything, endpoint).Once(). - Run(func(_ mock.Arguments) { closer.Close() }). + Run(func(_ mock.Arguments) { connCancel() }). Return(nil, tc.dialErr) } @@ -681,7 +681,7 @@ func TestRouter_DialPeers(t *testing.T) { time.Sleep(time.Millisecond) } else { select { - case <-closer.Done(): + case <-connCtx.Done(): case <-time.After(100 * time.Millisecond): require.Fail(t, "connection not closed") } diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 46227ff8f..222dbf79c 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -138,19 +138,35 @@ func (m *MConnTransport) Accept(ctx context.Context) (Connection, error) { return nil, errors.New("transport is not listening") } - tcpConn, err := m.listener.Accept() - if err != nil { + conCh := make(chan net.Conn) + errCh := make(chan error) + go func() { + tcpConn, err := m.listener.Accept() + if err != nil { + select { + case errCh <- err: + case <-ctx.Done(): + } + } select { + case conCh <- tcpConn: case <-ctx.Done(): - return nil, io.EOF - case <-m.doneCh: - return nil, io.EOF - default: - return nil, err } + }() + + select { + case <-ctx.Done(): + m.listener.Close() + return nil, io.EOF + case <-m.doneCh: + m.listener.Close() + return nil, io.EOF + case err := <-errCh: + return nil, err + case tcpConn := <-conCh: + return newMConnConnection(m.logger, tcpConn, m.mConnConfig, m.channelDescs), nil } - return newMConnConnection(m.logger, tcpConn, m.mConnConfig, m.channelDescs), nil } // Dial implements Transport. diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index 0851fe0e2..0f1c2e699 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -154,9 +154,6 @@ func TestMConnTransport_Listen(t *testing.T) { t.Run(tc.endpoint.String(), func(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ctx, cancel = context.WithCancel(ctx) - defer cancel() - transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 27b9e77e1..f363c12be 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -9,7 +9,6 @@ import ( "sync" "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -175,10 +174,17 @@ func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connecti inCh := make(chan memoryMessage, t.bufferSize) outCh := make(chan memoryMessage, t.bufferSize) - closer := tmsync.NewCloser() - outConn := newMemoryConnection(t.logger, t.nodeID, peer.nodeID, inCh, outCh, closer) - inConn := newMemoryConnection(peer.logger, peer.nodeID, t.nodeID, outCh, inCh, closer) + once := &sync.Once{} + closeCh := make(chan struct{}) + closeFn := func() { once.Do(func() { close(closeCh) }) } + + outConn := newMemoryConnection(t.logger, t.nodeID, peer.nodeID, inCh, outCh) + outConn.closeCh = closeCh + outConn.closeFn = closeFn + inConn := newMemoryConnection(peer.logger, peer.nodeID, t.nodeID, outCh, inCh) + inConn.closeCh = closeCh + inConn.closeFn = closeFn select { case peer.acceptCh <- inConn: @@ -202,7 +208,9 @@ type MemoryConnection struct { receiveCh <-chan memoryMessage sendCh chan<- memoryMessage - closer *tmsync.Closer + + closeFn func() + closeCh <-chan struct{} } // memoryMessage is passed internally, containing either a message or handshake. @@ -222,7 +230,6 @@ func newMemoryConnection( remoteID types.NodeID, receiveCh <-chan memoryMessage, sendCh chan<- memoryMessage, - closer *tmsync.Closer, ) *MemoryConnection { return &MemoryConnection{ logger: logger.With("remote", remoteID), @@ -230,7 +237,6 @@ func newMemoryConnection( remoteID: remoteID, receiveCh: receiveCh, sendCh: sendCh, - closer: closer, } } @@ -264,7 +270,7 @@ func (c *MemoryConnection) Handshake( select { case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}: c.logger.Debug("sent handshake", "nodeInfo", nodeInfo) - case <-c.closer.Done(): + case <-c.closeCh: return types.NodeInfo{}, nil, io.EOF case <-ctx.Done(): return types.NodeInfo{}, nil, ctx.Err() @@ -277,7 +283,7 @@ func (c *MemoryConnection) Handshake( } c.logger.Debug("received handshake", "peerInfo", msg.nodeInfo) return *msg.nodeInfo, msg.pubKey, nil - case <-c.closer.Done(): + case <-c.closeCh: return types.NodeInfo{}, nil, io.EOF case <-ctx.Done(): return types.NodeInfo{}, nil, ctx.Err() @@ -289,7 +295,7 @@ func (c *MemoryConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byt // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { - case <-c.closer.Done(): + case <-c.closeCh: return 0, nil, io.EOF case <-ctx.Done(): return 0, nil, io.EOF @@ -300,7 +306,9 @@ func (c *MemoryConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byt case msg := <-c.receiveCh: c.logger.Debug("received message", "chID", msg.channelID, "msg", msg.message) return msg.channelID, msg.message, nil - case <-c.closer.Done(): + case <-ctx.Done(): + return 0, nil, io.EOF + case <-c.closeCh: return 0, nil, io.EOF } } @@ -310,7 +318,7 @@ func (c *MemoryConnection) SendMessage(ctx context.Context, chID ChannelID, msg // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { - case <-c.closer.Done(): + case <-c.closeCh: return io.EOF case <-ctx.Done(): return io.EOF @@ -323,19 +331,10 @@ func (c *MemoryConnection) SendMessage(ctx context.Context, chID ChannelID, msg return nil case <-ctx.Done(): return io.EOF - case <-c.closer.Done(): + case <-c.closeCh: return io.EOF } } // Close implements Connection. -func (c *MemoryConnection) Close() error { - select { - case <-c.closer.Done(): - return nil - default: - c.closer.Close() - c.logger.Info("closed connection") - } - return nil -} +func (c *MemoryConnection) Close() error { c.closeFn(); return nil } diff --git a/internal/p2p/transport_memory_test.go b/internal/p2p/transport_memory_test.go index c4eea65c3..0569faa30 100644 --- a/internal/p2p/transport_memory_test.go +++ b/internal/p2p/transport_memory_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" diff --git a/internal/proxy/app_conn.go b/internal/proxy/app_conn.go index 4dc86b72c..9342bd75d 100644 --- a/internal/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -5,6 +5,7 @@ import ( "time" "github.com/go-kit/kit/metrics" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" ) @@ -24,9 +25,7 @@ type AppConnConsensus interface { ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error) ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error) VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) - BeginBlock(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTx(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error) - EndBlock(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) + FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) Commit(context.Context) (*types.ResponseCommit, error) } @@ -123,28 +122,12 @@ func (app *appConnConsensus) VerifyVoteExtension( return app.appConn.VerifyVoteExtension(ctx, req) } -func (app *appConnConsensus) BeginBlock( - ctx context.Context, - req types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "begin_block", "type", "sync"))() - return app.appConn.BeginBlock(ctx, req) -} - -func (app *appConnConsensus) DeliverTx( - ctx context.Context, - req types.RequestDeliverTx, -) (*types.ResponseDeliverTx, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "sync"))() - return app.appConn.DeliverTx(ctx, req) -} - -func (app *appConnConsensus) EndBlock( +func (app *appConnConsensus) FinalizeBlock( ctx context.Context, - req types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "sync"))() - return app.appConn.EndBlock(ctx, req) + req types.RequestFinalizeBlock, +) (*types.ResponseFinalizeBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))() + return app.appConn.FinalizeBlock(ctx, req) } func (app *appConnConsensus) Commit(ctx context.Context) (*types.ResponseCommit, error) { diff --git a/internal/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go index de8eac35d..22f519657 100644 --- a/internal/proxy/app_conn_test.go +++ b/internal/proxy/app_conn_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" diff --git a/internal/proxy/mocks/app_conn_consensus.go b/internal/proxy/mocks/app_conn_consensus.go index ba34020cc..38211c190 100644 --- a/internal/proxy/mocks/app_conn_consensus.go +++ b/internal/proxy/mocks/app_conn_consensus.go @@ -17,29 +17,6 @@ type AppConnConsensus struct { mock.Mock } -// BeginBlock provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - ret := _m.Called(_a0, _a1) - - var r0 *types.ResponseBeginBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseBeginBlock) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // Commit provides a mock function with given fields: _a0 func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, error) { ret := _m.Called(_a0) @@ -63,44 +40,35 @@ func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, return r0, r1 } -// DeliverTx provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) DeliverTx(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *types.ResponseDeliverTx - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseDeliverTx) - } - } +// Error provides a mock function with given fields: +func (_m *AppConnConsensus) Error() error { + ret := _m.Called() - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { - r1 = rf(_a0, _a1) + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() } else { - r1 = ret.Error(1) + r0 = ret.Error(0) } - return r0, r1 + return r0 } -// EndBlock provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) EndBlock(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { +// ExtendVote provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseEndBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok { + var r0 *types.ResponseExtendVote + if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEndBlock) + r0 = ret.Get(0).(*types.ResponseExtendVote) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -109,35 +77,21 @@ func (_m *AppConnConsensus) EndBlock(_a0 context.Context, _a1 types.RequestEndBl return r0, r1 } -// Error provides a mock function with given fields: -func (_m *AppConnConsensus) Error() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ExtendVote provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) { +// FinalizeBlock provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) FinalizeBlock(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseExtendVote - if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok { + var r0 *types.ResponseFinalizeBlock + if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseExtendVote) + r0 = ret.Get(0).(*types.ResponseFinalizeBlock) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/internal/pubsub/pubsub.go b/internal/pubsub/pubsub.go index 5f6a1ee3b..707f9cb13 100644 --- a/internal/pubsub/pubsub.go +++ b/internal/pubsub/pubsub.go @@ -281,6 +281,9 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { s.subs.Lock() defer s.subs.Unlock() + if s.subs.index == nil { + return ErrServerStopped + } evict := s.subs.index.findClientID(clientID) if len(evict) == 0 { return ErrSubscriptionNotFound diff --git a/internal/pubsub/pubsub_test.go b/internal/pubsub/pubsub_test.go index 9ba515d70..eee065fbf 100644 --- a/internal/pubsub/pubsub_test.go +++ b/internal/pubsub/pubsub_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/internal/pubsub/query" diff --git a/internal/pubsub/subscription.go b/internal/pubsub/subscription.go index 933e62e1c..fd1a23929 100644 --- a/internal/pubsub/subscription.go +++ b/internal/pubsub/subscription.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/google/uuid" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/queue" "github.com/tendermint/tendermint/types" diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 9f6c872ca..6258dc060 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -208,18 +208,17 @@ func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*co } var totalGasUsed int64 - for _, tx := range results.GetDeliverTxs() { + for _, tx := range results.FinalizeBlock.GetTxs() { totalGasUsed += tx.GetGasUsed() } return &coretypes.ResultBlockResults{ Height: height, - TxsResults: results.DeliverTxs, + TxsResults: results.FinalizeBlock.Txs, TotalGasUsed: totalGasUsed, - BeginBlockEvents: results.BeginBlock.Events, - EndBlockEvents: results.EndBlock.Events, - ValidatorUpdates: results.EndBlock.ValidatorUpdates, - ConsensusParamUpdates: results.EndBlock.ConsensusParamUpdates, + FinalizeBlockEvents: results.FinalizeBlock.Events, + ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates, + ConsensusParamUpdates: results.FinalizeBlock.ConsensusParamUpdates, }, nil } diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index b1746acb7..4baff9d38 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -71,13 +71,13 @@ func TestBlockchainInfo(t *testing.T) { func TestBlockResults(t *testing.T) { results := &tmstate.ABCIResponses{ - DeliverTxs: []*abci.ResponseDeliverTx{ - {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, - {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, - {Code: 1, Log: "not ok", GasUsed: 0}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + Txs: []*abci.ResponseDeliverTx{ + {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, + {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, + {Code: 1, Log: "not ok", GasUsed: 0}, + }, }, - EndBlock: &abci.ResponseEndBlock{}, - BeginBlock: &abci.ResponseBeginBlock{}, } env := &Environment{} @@ -99,12 +99,11 @@ func TestBlockResults(t *testing.T) { {101, true, nil}, {100, false, &coretypes.ResultBlockResults{ Height: 100, - TxsResults: results.DeliverTxs, + TxsResults: results.FinalizeBlock.Txs, TotalGasUsed: 15, - BeginBlockEvents: results.BeginBlock.Events, - EndBlockEvents: results.EndBlock.Events, - ValidatorUpdates: results.EndBlock.ValidatorUpdates, - ConsensusParamUpdates: results.EndBlock.ConsensusParamUpdates, + FinalizeBlockEvents: results.FinalizeBlock.Events, + ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates, + ConsensusParamUpdates: results.FinalizeBlock.ConsensusParamUpdates, }}, } diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 448cd85a0..1b046038e 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -10,6 +10,7 @@ import ( "time" "github.com/rs/cors" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/blocksync" diff --git a/internal/state/execution.go b/internal/state/execution.go index 6da7060a8..688a5470b 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -106,6 +106,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( height int64, state State, commit *types.Commit, proposerAddr []byte, + votes []*types.Vote, ) (*types.Block, *types.PartSet, error) { maxBytes := state.ConsensusParams.Block.MaxBytes @@ -120,7 +121,11 @@ func (blockExec *BlockExecutor) CreateProposalBlock( preparedProposal, err := blockExec.proxyApp.PrepareProposal( ctx, - abci.RequestPrepareProposal{BlockData: txs.ToSliceOfBytes(), BlockDataSize: maxDataBytes}, + abci.RequestPrepareProposal{ + BlockData: txs.ToSliceOfBytes(), + BlockDataSize: maxDataBytes, + Votes: types.VotesToProto(votes), + }, ) if err != nil { // The App MUST ensure that only valid (and hence 'processable') transactions @@ -223,7 +228,7 @@ func (blockExec *BlockExecutor) ApplyBlock( } // validate the validator updates and convert to tendermint types - abciValUpdates := abciResponses.EndBlock.ValidatorUpdates + abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator) if err != nil { return state, fmt.Errorf("error in validator updates: %w", err) @@ -244,7 +249,7 @@ func (blockExec *BlockExecutor) ApplyBlock( } // Lock mempool, commit app state, update mempoool. - appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.DeliverTxs) + appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.FinalizeBlock.Txs) if err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } @@ -287,7 +292,6 @@ func (blockExec *BlockExecutor) ExtendVote(ctx context.Context, vote *types.Vote if err != nil { return types.VoteExtension{}, err } - return types.VoteExtensionFromProto(resp.VoteExtension), nil } @@ -372,12 +376,10 @@ func execBlockOnProxyApp( store Store, initialHeight int64, ) (*tmstate.ABCIResponses, error) { - var validTxs, invalidTxs = 0, 0 - - txIndex := 0 abciResponses := new(tmstate.ABCIResponses) + abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{} dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs)) - abciResponses.DeliverTxs = dtxs + abciResponses.FinalizeBlock.Txs = dtxs commitInfo := getBeginBlockValidatorInfo(block, store, initialHeight) @@ -393,44 +395,22 @@ func execBlockOnProxyApp( return nil, errors.New("nil header") } - abciResponses.BeginBlock, err = proxyAppConn.BeginBlock( + abciResponses.FinalizeBlock, err = proxyAppConn.FinalizeBlock( ctx, - abci.RequestBeginBlock{ + abci.RequestFinalizeBlock{ Hash: block.Hash(), Header: *pbh, + Height: block.Height, LastCommitInfo: commitInfo, ByzantineValidators: byzVals, + Txs: block.Txs.ToSliceOfBytes(), }, ) if err != nil { - logger.Error("error in proxyAppConn.BeginBlock", "err", err) - return nil, err - } - - // run txs of block - for _, tx := range block.Txs { - resp, err := proxyAppConn.DeliverTx(ctx, abci.RequestDeliverTx{Tx: tx}) - if err != nil { - return nil, err - } - if resp.Code == abci.CodeTypeOK { - validTxs++ - } else { - logger.Debug("invalid tx", "code", resp.Code, "log", resp.Log) - invalidTxs++ - } - - abciResponses.DeliverTxs[txIndex] = resp - txIndex++ - } - - abciResponses.EndBlock, err = proxyAppConn.EndBlock(ctx, abci.RequestEndBlock{Height: block.Height}) - if err != nil { - logger.Error("error in proxyAppConn.EndBlock", "err", err) + logger.Error("error in proxyAppConn.FinalizeBlock", "err", err) return nil, err } - - logger.Info("executed block", "height", block.Height, "num_valid_txs", validTxs, "num_invalid_txs", invalidTxs) + logger.Info("executed block", "height", block.Height) return abciResponses, nil } @@ -529,9 +509,9 @@ func updateState( // Update the params with the latest abciResponses. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged - if abciResponses.EndBlock.ConsensusParamUpdates != nil { + if abciResponses.FinalizeBlock.ConsensusParamUpdates != nil { // NOTE: must not mutate s.ConsensusParams - nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.EndBlock.ConsensusParamUpdates) + nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.FinalizeBlock.ConsensusParamUpdates) err := nextParams.ValidateConsensusParams() if err != nil { return state, fmt.Errorf("error updating consensus params: %w", err) @@ -578,19 +558,17 @@ func fireEvents( validatorUpdates []*types.Validator, ) { if err := eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultBeginBlock: *abciResponses.BeginBlock, - ResultEndBlock: *abciResponses.EndBlock, + Block: block, + BlockID: blockID, + ResultFinalizeBlock: *abciResponses.FinalizeBlock, }); err != nil { logger.Error("failed publishing new block", "err", err) } if err := eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{ - Header: block.Header, - NumTxs: int64(len(block.Txs)), - ResultBeginBlock: *abciResponses.BeginBlock, - ResultEndBlock: *abciResponses.EndBlock, + Header: block.Header, + NumTxs: int64(len(block.Txs)), + ResultFinalizeBlock: *abciResponses.FinalizeBlock, }); err != nil { logger.Error("failed publishing new block header", "err", err) } @@ -606,13 +584,21 @@ func fireEvents( } } + // sanity check + if len(abciResponses.FinalizeBlock.Txs) != len(block.Data.Txs) { + panic(fmt.Sprintf("number of TXs (%d) and ABCI TX responses (%d) do not match", + len(block.Data.Txs), len(abciResponses.FinalizeBlock.Txs))) + } + for i, tx := range block.Data.Txs { - if err := eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: abci.TxResult{ - Height: block.Height, - Index: uint32(i), - Tx: tx, - Result: *(abciResponses.DeliverTxs[i]), - }}); err != nil { + if err := eventBus.PublishEventTx(ctx, types.EventDataTx{ + TxResult: abci.TxResult{ + Height: block.Height, + Index: uint32(i), + Tx: tx, + Result: *(abciResponses.FinalizeBlock.Txs[i]), + }, + }); err != nil { logger.Error("failed publishing event TX", "err", err) } } @@ -648,7 +634,7 @@ func ExecCommitBlock( // the BlockExecutor condition is using for the final block replay process. if be != nil { - abciValUpdates := abciResponses.EndBlock.ValidatorUpdates + abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates err = validateValidatorUpdates(abciValUpdates, s.ConsensusParams.Validator) if err != nil { logger.Error("err", err) diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 4bebe1a94..ed69b275b 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -156,8 +156,7 @@ func makeHeaderPartsResponsesValPubKeyChange( block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) require.NoError(t, err) abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil}, } // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) @@ -167,7 +166,7 @@ func makeHeaderPartsResponsesValPubKeyChange( pbPk, err := encoding.PubKeyToProto(pubkey) require.NoError(t, err) - abciResponses.EndBlock = &abci.ResponseEndBlock{ + abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ ValidatorUpdates: []abci.ValidatorUpdate{ {PubKey: vPbPk, Power: 0}, {PubKey: pbPk, Power: 10}, @@ -189,8 +188,7 @@ func makeHeaderPartsResponsesValPowerChange( require.NoError(t, err) abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil}, } // If the pubkey is new, remove the old and add the new. @@ -199,7 +197,7 @@ func makeHeaderPartsResponsesValPowerChange( vPbPk, err := encoding.PubKeyToProto(val.PubKey) require.NoError(t, err) - abciResponses.EndBlock = &abci.ResponseEndBlock{ + abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ ValidatorUpdates: []abci.ValidatorUpdate{ {PubKey: vPbPk, Power: power}, }, @@ -220,8 +218,7 @@ func makeHeaderPartsResponsesParams( require.NoError(t, err) pbParams := params.ToProto() abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: &pbParams}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams}, } return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses } @@ -298,22 +295,29 @@ func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { return abci.ResponseInfo{} } -func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { +func (app *testApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock { app.CommitVotes = req.LastCommitInfo.Votes app.ByzantineValidators = req.ByzantineValidators - return abci.ResponseBeginBlock{} -} -func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ + resTxs := make([]*abci.ResponseDeliverTx, len(req.Txs)) + for i, tx := range req.Txs { + if len(tx) > 0 { + resTxs[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + } else { + resTxs[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK + 10} // error + } + } + + return abci.ResponseFinalizeBlock{ ValidatorUpdates: app.ValidatorUpdates, ConsensusParamUpdates: &tmproto.ConsensusParams{ Version: &tmproto.VersionParams{ - AppVersion: 1}}} -} - -func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Events: []abci.Event{}} + AppVersion: 1, + }, + }, + Events: []abci.Event{}, + Txs: resTxs, + } } func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index 2ac133bf1..f26eb30bb 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -66,13 +66,8 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { } // 2. index BeginBlock events - if err := idx.indexEvents(batch, bh.ResultBeginBlock.Events, "begin_block", height); err != nil { - return fmt.Errorf("failed to index BeginBlock events: %w", err) - } - - // 3. index EndBlock events - if err := idx.indexEvents(batch, bh.ResultEndBlock.Events, "end_block", height); err != nil { - return fmt.Errorf("failed to index EndBlock events: %w", err) + if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, "finalize_block", height); err != nil { + return fmt.Errorf("failed to index FinalizeBlock events: %w", err) } return batch.WriteSync() diff --git a/internal/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go index 8878e0f08..eabe981a3 100644 --- a/internal/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -20,10 +20,10 @@ func TestBlockIndexer(t *testing.T) { require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_event1", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -32,12 +32,8 @@ func TestBlockIndexer(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_event2", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -55,13 +51,12 @@ func TestBlockIndexer(t *testing.T) { if i%2 == 0 { index = true } - require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ Header: types.Header{Height: int64(i)}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_event1", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -70,12 +65,8 @@ func TestBlockIndexer(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_event2", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -102,31 +93,31 @@ func TestBlockIndexer(t *testing.T) { results: []int64{5}, }, "begin_event.key1 = 'value1'": { - q: query.MustCompile(`begin_event.key1 = 'value1'`), + q: query.MustCompile(`finalize_event1.key1 = 'value1'`), results: []int64{}, }, "begin_event.proposer = 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer = 'FCAA001'`), + q: query.MustCompile(`finalize_event1.proposer = 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, "end_event.foo <= 5": { - q: query.MustCompile(`end_event.foo <= 5`), + q: query.MustCompile(`finalize_event2.foo <= 5`), results: []int64{2, 4}, }, "end_event.foo >= 100": { - q: query.MustCompile(`end_event.foo >= 100`), + q: query.MustCompile(`finalize_event2.foo >= 100`), results: []int64{1}, }, - "block.height > 2 AND end_event.foo <= 8": { - q: query.MustCompile(`block.height > 2 AND end_event.foo <= 8`), + "block.height > 2 AND finalize_event2.foo <= 8": { + q: query.MustCompile(`block.height > 2 AND finalize_event2.foo <= 8`), results: []int64{4, 6, 8}, }, "begin_event.proposer CONTAINS 'FFFFFFF'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FFFFFFF'`), + q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FFFFFFF'`), results: []int64{}, }, "begin_event.proposer CONTAINS 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FCAA001'`), + q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, } diff --git a/internal/state/indexer/block/kv/util.go b/internal/state/indexer/block/kv/util.go index 28e22718c..fd6846273 100644 --- a/internal/state/indexer/block/kv/util.go +++ b/internal/state/indexer/block/kv/util.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/google/orderedcode" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index 47be1e28e..d640d4b23 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -137,6 +137,9 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { t.Helper() pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) assert.NoError(t, err) + if _, err := pool.Client.Info(); err != nil { + t.Skipf("WARNING: Docker is not available: %v [skipping this test]", err) + } resource, err = pool.RunWithOptions(&dockertest.RunOptions{ Repository: "postgres", diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index d5555a417..6173480dd 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -6,6 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/internal/pubsub/query" diff --git a/internal/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go index b6436dafd..b59d55856 100644 --- a/internal/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -10,6 +10,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" @@ -33,10 +34,10 @@ func TestBlockFuncs(t *testing.T) { require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_eventA", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -45,12 +46,8 @@ func TestBlockFuncs(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_eventB", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -75,10 +72,10 @@ func TestBlockFuncs(t *testing.T) { require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ Header: types.Header{Height: int64(i)}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_eventA", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -87,12 +84,8 @@ func TestBlockFuncs(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_eventB", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -118,32 +111,32 @@ func TestBlockFuncs(t *testing.T) { q: query.MustCompile(`block.height = 5`), results: []int64{5}, }, - "begin_event.key1 = 'value1'": { - q: query.MustCompile(`begin_event.key1 = 'value1'`), + "finalize_eventA.key1 = 'value1'": { + q: query.MustCompile(`finalize_eventA.key1 = 'value1'`), results: []int64{}, }, - "begin_event.proposer = 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer = 'FCAA001'`), + "finalize_eventA.proposer = 'FCAA001'": { + q: query.MustCompile(`finalize_eventA.proposer = 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, - "end_event.foo <= 5": { - q: query.MustCompile(`end_event.foo <= 5`), + "finalize_eventB.foo <= 5": { + q: query.MustCompile(`finalize_eventB.foo <= 5`), results: []int64{2, 4}, }, - "end_event.foo >= 100": { - q: query.MustCompile(`end_event.foo >= 100`), + "finalize_eventB.foo >= 100": { + q: query.MustCompile(`finalize_eventB.foo >= 100`), results: []int64{1}, }, - "block.height > 2 AND end_event.foo <= 8": { - q: query.MustCompile(`block.height > 2 AND end_event.foo <= 8`), + "block.height > 2 AND finalize_eventB.foo <= 8": { + q: query.MustCompile(`block.height > 2 AND finalize_eventB.foo <= 8`), results: []int64{4, 6, 8}, }, - "begin_event.proposer CONTAINS 'FFFFFFF'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FFFFFFF'`), + "finalize_eventA.proposer CONTAINS 'FFFFFFF'": { + q: query.MustCompile(`finalize_eventA.proposer CONTAINS 'FFFFFFF'`), results: []int64{}, }, - "begin_event.proposer CONTAINS 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FCAA001'`), + "finalize_eventA.proposer CONTAINS 'FCAA001'": { + q: query.MustCompile(`finalize_eventA.proposer CONTAINS 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, } diff --git a/internal/state/indexer/sink/null/null_test.go b/internal/state/indexer/sink/null/null_test.go index 6de7669ce..9af66027f 100644 --- a/internal/state/indexer/sink/null/null_test.go +++ b/internal/state/indexer/sink/null/null_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go index 1208bca19..c06383264 100644 --- a/internal/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -10,6 +10,7 @@ import ( "time" "github.com/gogo/protobuf/proto" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" @@ -169,11 +170,8 @@ INSERT INTO `+tableBlocks+` (height, chain_id, created_at) return fmt.Errorf("block meta-events: %w", err) } // Insert all the block events. Order is important here, - if err := insertEvents(dbtx, blockID, 0, h.ResultBeginBlock.Events); err != nil { - return fmt.Errorf("begin-block events: %w", err) - } - if err := insertEvents(dbtx, blockID, 0, h.ResultEndBlock.Events); err != nil { - return fmt.Errorf("end-block events: %w", err) + if err := insertEvents(dbtx, blockID, 0, h.ResultFinalizeBlock.Events); err != nil { + return fmt.Errorf("finalize-block events: %w", err) } return nil }) diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index 2168eb556..9ac541c72 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -17,6 +17,7 @@ import ( "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" @@ -52,12 +53,19 @@ const ( func TestMain(m *testing.M) { flag.Parse() - // Set up docker and start a container running PostgreSQL. + // Set up docker. pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) if err != nil { log.Fatalf("Creating docker pool: %v", err) } + // If docker is unavailable, log and exit without reporting failure. + if _, err := pool.Client.Info(); err != nil { + log.Printf("WARNING: Docker is not available: %v [skipping this test]", err) + return + } + + // Start a container running PostgreSQL. resource, err := pool.RunWithOptions(&dockertest.RunOptions{ Repository: "postgres", Tag: "13", @@ -213,15 +221,11 @@ func TestStop(t *testing.T) { func newTestBlockHeader() types.EventDataNewBlockHeader { return types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ - makeIndexedEvent("begin_event.proposer", "FCAA001"), + makeIndexedEvent("finalize_event.proposer", "FCAA001"), makeIndexedEvent("thingy.whatzit", "O.O"), - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ - makeIndexedEvent("end_event.foo", "100"), + makeIndexedEvent("my_event.foo", "100"), makeIndexedEvent("thingy.whatzit", "-.O"), }, }, diff --git a/internal/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go index 7744c3183..e36aed185 100644 --- a/internal/state/indexer/tx/kv/kv_bench_test.go +++ b/internal/state/indexer/tx/kv/kv_bench_test.go @@ -4,7 +4,6 @@ import ( "context" "crypto/rand" "fmt" - "os" "testing" dbm "github.com/tendermint/tm-db" @@ -15,10 +14,7 @@ import ( ) func BenchmarkTxSearch(b *testing.B) { - dbDir, err := os.MkdirTemp("", "benchmark_tx_search_test") - if err != nil { - b.Errorf("failed to create temporary directory: %s", err) - } + dbDir := b.TempDir() db, err := dbm.NewGoLevelDB("benchmark_tx_search_test", dbDir) if err != nil { diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index 018fe51b4..2caf9efc1 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -3,7 +3,6 @@ package kv import ( "context" "fmt" - "os" "testing" "github.com/gogo/protobuf/proto" @@ -333,9 +332,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } func benchmarkTxIndex(txsCount int64, b *testing.B) { - dir, err := os.MkdirTemp("", "tx_index_db") - require.NoError(b, err) - defer os.RemoveAll(dir) + dir := b.TempDir() store, err := dbm.NewDB("tx_index", "goleveldb", dir) require.NoError(b, err) diff --git a/internal/state/mocks/event_sink.go b/internal/state/mocks/event_sink.go index 97e3aff76..9f2d2daf3 100644 --- a/internal/state/mocks/event_sink.go +++ b/internal/state/mocks/event_sink.go @@ -6,6 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/internal/pubsub/query" diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 8bf4a9b64..67127a5fd 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -4,6 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 02c69d3e0..b7a58e415 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,6 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" diff --git a/internal/state/state.go b/internal/state/state.go index 20a36a1c4..43cd78fb0 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -8,6 +8,7 @@ import ( "time" "github.com/gogo/protobuf/proto" + tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" diff --git a/internal/state/state_test.go b/internal/state/state_test.go index d5c6a649d..e66cde77a 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -26,7 +26,7 @@ import ( // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { - cfg, err := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot(t.TempDir(), "state_") require.NoError(t, err) dbType := dbm.BackendType(cfg.DBBackend) @@ -108,13 +108,14 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ResponseDeliverTx, 2) - abciResponses.DeliverTxs = dtxs + abciResponses.FinalizeBlock = new(abci.ResponseFinalizeBlock) + abciResponses.FinalizeBlock.Txs = dtxs - abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} - abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} + abciResponses.FinalizeBlock.Txs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} + abciResponses.FinalizeBlock.Txs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} pbpk, err := encoding.PubKeyToProto(ed25519.GenPrivKey().PubKey()) require.NoError(t, err) - abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}} + abciResponses.FinalizeBlock.ValidatorUpdates = []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}} err = stateStore.SaveABCIResponses(block.Height, abciResponses) require.NoError(t, err) @@ -148,7 +149,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { }, []*abci.ResponseDeliverTx{ {Code: 32, Data: []byte("Hello")}, - }}, + }, + }, 2: { []*abci.ResponseDeliverTx{ {Code: 383}, @@ -166,7 +168,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { {Type: "type1", Attributes: []abci.EventAttribute{{Key: "a", Value: "1"}}}, {Type: "type2", Attributes: []abci.EventAttribute{{Key: "build", Value: "stuff"}}}, }}, - }}, + }, + }, 3: { nil, nil, @@ -188,9 +191,9 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save responses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - DeliverTxs: tc.added, - EndBlock: &abci.ResponseEndBlock{}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + Txs: tc.added, + }, } err := stateStore.SaveABCIResponses(h, responses) require.NoError(t, err) @@ -203,10 +206,12 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { if assert.NoError(t, err, "%d", i) { t.Log(res) responses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - DeliverTxs: tc.expected, - EndBlock: &abci.ResponseEndBlock{}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + Txs: tc.expected, + }, } + sm.ABCIResponsesResultsHash(res) + sm.ABCIResponsesResultsHash(responses) assert.Equal(t, sm.ABCIResponsesResultsHash(responses), sm.ABCIResponsesResultsHash(res), "%d", i) } } @@ -271,7 +276,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { power++ } header, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, power) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) @@ -452,10 +457,11 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) @@ -570,10 +576,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} // no updates: abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) @@ -633,7 +640,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { updatedVal2, ) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) @@ -673,10 +680,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // -> proposers should alternate: oldState := updatedState3 abciResponses = &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) oldState, err = sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -689,10 +697,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { for i := 0; i < 1000; i++ { // no validator updates: abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -747,10 +756,11 @@ func TestLargeGenesisValidator(t *testing.T) { for i := 0; i < 10; i++ { // no updates: abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) @@ -782,8 +792,9 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) assert.NoError(t, err) abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}, + }, } block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) require.NoError(t, err) @@ -799,10 +810,11 @@ func TestLargeGenesisValidator(t *testing.T) { for i := 0; i < 200; i++ { // no updates: abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) block, err := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) @@ -840,8 +852,9 @@ func TestLargeGenesisValidator(t *testing.T) { assert.NoError(t, err) abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{addedVal}, + }, } block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) require.NoError(t, err) @@ -859,8 +872,9 @@ func TestLargeGenesisValidator(t *testing.T) { require.NoError(t, err) removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} abciResponses = &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}, + }, } block, err = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) @@ -870,7 +884,7 @@ func TestLargeGenesisValidator(t *testing.T) { require.NoError(t, err) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -884,10 +898,11 @@ func TestLargeGenesisValidator(t *testing.T) { isProposerUnchanged := true for isProposerUnchanged { abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) block, err = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) require.NoError(t, err) @@ -913,10 +928,11 @@ func TestLargeGenesisValidator(t *testing.T) { for i := 0; i < 100; i++ { // no updates: abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + ValidatorUpdates: nil, + }, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) block, err := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) @@ -984,7 +1000,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Save state etc. var validatorUpdates []*types.Validator - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) @@ -1062,7 +1078,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { cp = params[changeIndex] } header, blockID, responses := makeHeaderPartsResponsesParams(t, state, &cp) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) diff --git a/internal/state/store.go b/internal/state/store.go index de17be0d7..c8b99b36d 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -401,7 +401,7 @@ func (store dbStore) reverseBatchDelete(batch dbm.Batch, start, end []byte) ([]b // // See merkle.SimpleHashFromByteSlices func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { - return types.NewResults(ar.DeliverTxs).Hash() + return types.NewResults(ar.FinalizeBlock.Txs).Hash() } // LoadABCIResponses loads the ABCIResponses for the given height from the @@ -444,13 +444,13 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { var dtxs []*abci.ResponseDeliverTx // strip nil values, - for _, tx := range abciResponses.DeliverTxs { + for _, tx := range abciResponses.FinalizeBlock.Txs { if tx != nil { dtxs = append(dtxs, tx) } } - abciResponses.DeliverTxs = dtxs + abciResponses.FinalizeBlock.Txs = dtxs bz, err := abciResponses.Marshal() if err != nil { diff --git a/internal/state/store_test.go b/internal/state/store_test.go index d7e599610..fd9c4bf5a 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -110,7 +110,7 @@ func TestStoreLoadValidators(t *testing.T) { func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 - cfg, err := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot(b.TempDir(), "state_") require.NoError(b, err) defer os.RemoveAll(cfg.RootDir) @@ -238,10 +238,12 @@ func TestPruneStates(t *testing.T) { require.NoError(t, err) err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{ - DeliverTxs: []*abci.ResponseDeliverTx{ - {Data: []byte{1}}, - {Data: []byte{2}}, - {Data: []byte{3}}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + Txs: []*abci.ResponseDeliverTx{ + {Data: []byte{1}}, + {Data: []byte{2}}, + {Data: []byte{3}}, + }, }, }) require.NoError(t, err) @@ -300,20 +302,20 @@ func TestPruneStates(t *testing.T) { func TestABCIResponsesResultsHash(t *testing.T) { responses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - DeliverTxs: []*abci.ResponseDeliverTx{ - {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + Txs: []*abci.ResponseDeliverTx{ + {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, + }, }, - EndBlock: &abci.ResponseEndBlock{}, } root := sm.ABCIResponsesResultsHash(responses) - // root should be Merkle tree root of DeliverTxs responses - results := types.NewResults(responses.DeliverTxs) + // root should be Merkle tree root of FinalizeBlock tx responses + results := types.NewResults(responses.FinalizeBlock.Txs) assert.Equal(t, root, results.Hash()) - // test we can prove first DeliverTx + // test we can prove first tx in FinalizeBlock proof := results.ProveResult(0) bz, err := results[0].Marshal() require.NoError(t, err) diff --git a/internal/state/test/factory/block.go b/internal/state/test/factory/block.go index 96dba1ec3..5154d170a 100644 --- a/internal/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -6,6 +6,7 @@ import ( "time" "github.com/stretchr/testify/require" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/types" diff --git a/internal/state/time_test.go b/internal/state/time_test.go index 893ade7ea..5da97e819 100644 --- a/internal/state/time_test.go +++ b/internal/state/time_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/stretchr/testify/assert" + tmtime "github.com/tendermint/tendermint/libs/time" ) diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index c3604df9d..8480b4dd8 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -18,7 +18,7 @@ func setupChunkQueue(t *testing.T) (*chunkQueue, func()) { Hash: []byte{7}, Metadata: nil, } - queue, err := newChunkQueue(snapshot, "") + queue, err := newChunkQueue(snapshot, t.TempDir()) require.NoError(t, err) teardown := func() { err := queue.Close() @@ -35,9 +35,7 @@ func TestNewChunkQueue_TempDir(t *testing.T) { Hash: []byte{7}, Metadata: nil, } - dir, err := os.MkdirTemp("", "newchunkqueue") - require.NoError(t, err) - defer os.RemoveAll(dir) + dir := t.TempDir() queue, err := newChunkQueue(snapshot, dir) require.NoError(t, err) diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index b8d681631..b19a6787f 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -6,6 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 46287ada1..be0c5b63d 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -503,7 +503,7 @@ func TestSyncer_applyChunks_Results(t *testing.T) { rts := setup(ctx, t, nil, nil, stateProvider, 2) body := []byte{1, 2, 3} - chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, t.TempDir()) require.NoError(t, err) fetchStartTime := time.Now() @@ -562,7 +562,7 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { rts := setup(ctx, t, nil, nil, stateProvider, 2) - chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, t.TempDir()) require.NoError(t, err) fetchStartTime := time.Now() @@ -660,7 +660,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerCID, s2) require.NoError(t, err) - chunks, err := newChunkQueue(s1, "") + chunks, err := newChunkQueue(s1, t.TempDir()) require.NoError(t, err) fetchStartTime := time.Now() diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 52bf518d4..8e1ee2db3 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -46,8 +46,8 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { commitSigs) } -func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc, error) { - cfg, err := config.ResetTestRoot("blockchain_reactor_test") +func makeStateAndBlockStore(dir string, logger log.Logger) (sm.State, *BlockStore, cleanupFunc, error) { + cfg, err := config.ResetTestRoot(dir, "blockchain_reactor_test") if err != nil { return sm.State{}, nil, nil, err } @@ -75,10 +75,13 @@ var ( ) func TestMain(m *testing.M) { + dir, err := os.MkdirTemp("", "store_test") + if err != nil { + stdlog.Fatal(err) + } var cleanup cleanupFunc - var err error - state, _, cleanup, err = makeStateAndBlockStore(log.NewNopLogger()) + state, _, cleanup, err = makeStateAndBlockStore(dir, log.NewNopLogger()) if err != nil { stdlog.Fatal(err) } @@ -97,12 +100,13 @@ func TestMain(m *testing.M) { seenCommit1 = makeTestCommit(10, tmtime.Now()) code := m.Run() cleanup() + os.RemoveAll(dir) // best-effort os.Exit(code) } // TODO: This test should be simplified ... func TestBlockStoreSaveLoadBlock(t *testing.T) { - state, bs, cleanup, err := makeStateAndBlockStore(log.NewNopLogger()) + state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir(), log.NewNopLogger()) defer cleanup() require.NoError(t, err) require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") @@ -313,7 +317,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } func TestLoadBaseMeta(t *testing.T) { - cfg, err := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -373,7 +377,7 @@ func TestLoadBlockPart(t *testing.T) { } func TestPruneBlocks(t *testing.T) { - cfg, err := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -494,7 +498,7 @@ func TestLoadBlockMeta(t *testing.T) { } func TestBlockFetchAtHeight(t *testing.T) { - state, bs, cleanup, err := makeStateAndBlockStore(log.NewNopLogger()) + state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir(), log.NewNopLogger()) defer cleanup() require.NoError(t, err) require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") diff --git a/internal/test/factory/block.go b/internal/test/factory/block.go index 3fd34cdc5..1d5709dcc 100644 --- a/internal/test/factory/block.go +++ b/internal/test/factory/block.go @@ -5,6 +5,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/types" diff --git a/internal/test/factory/p2p.go b/internal/test/factory/p2p.go index 40d8eda9d..e2edcba6a 100644 --- a/internal/test/factory/p2p.go +++ b/internal/test/factory/p2p.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" ) diff --git a/internal/test/factory/validator.go b/internal/test/factory/validator.go index 383ba7536..6d8f4f716 100644 --- a/internal/test/factory/validator.go +++ b/internal/test/factory/validator.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" ) diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 76f3c9043..6f723ac02 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -1,29 +1,20 @@ package cli import ( - "bytes" + "context" "fmt" - "io" "os" - "path/filepath" + "runtime" "github.com/spf13/cobra" + "github.com/spf13/viper" ) -// WriteConfigVals writes a toml file with the given values. -// It returns an error if writing was impossible. -func WriteConfigVals(dir string, vals map[string]string) error { - data := "" - for k, v := range vals { - data += fmt.Sprintf("%s = \"%s\"\n", k, v) - } - cfile := filepath.Join(dir, "config.toml") - return os.WriteFile(cfile, []byte(data), 0600) -} - // RunWithArgs executes the given command with the specified command line args // and environmental variables set. It returns any error returned from cmd.Execute() -func RunWithArgs(cmd Executable, args []string, env map[string]string) error { +// +// This is only used in testing. +func RunWithArgs(ctx context.Context, cmd *cobra.Command, args []string, env map[string]string) error { oargs := os.Args oenv := map[string]string{} // defer returns the environment back to normal @@ -46,85 +37,24 @@ func RunWithArgs(cmd Executable, args []string, env map[string]string) error { } // and finally run the command - return cmd.Execute() + return RunWithTrace(ctx, cmd) } -// RunCaptureWithArgs executes the given command with the specified command -// line args and environmental variables set. It returns string fields -// representing output written to stdout and stderr, additionally any error -// from cmd.Execute() is also returned -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { - oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout - rOut, wOut, _ := os.Pipe() - rErr, wErr, _ := os.Pipe() - os.Stdout, os.Stderr = wOut, wErr - defer func() { - os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout - }() - - // copy the output in a separate goroutine so printing can't block indefinitely - copyStd := func(reader *os.File) *(chan string) { - stdC := make(chan string) - go func() { - var buf bytes.Buffer - // io.Copy will end when we call reader.Close() below - io.Copy(&buf, reader) //nolint:errcheck //ignore error - select { - case <-cmd.Context().Done(): - case stdC <- buf.String(): - } - }() - return &stdC - } - outC := copyStd(rOut) - errC := copyStd(rErr) - - // now run the command - err = RunWithArgs(cmd, args, env) - - // and grab the stdout to return - wOut.Close() - wErr.Close() - stdout = <-*outC - stderr = <-*errC - return stdout, stderr, err -} - -// NewCompletionCmd returns a cobra.Command that generates bash and zsh -// completion scripts for the given root command. If hidden is true, the -// command will not show up in the root command's list of available commands. -func NewCompletionCmd(rootCmd *cobra.Command, hidden bool) *cobra.Command { - flagZsh := "zsh" - cmd := &cobra.Command{ - Use: "completion", - Short: "Generate shell completion scripts", - Long: fmt.Sprintf(`Generate Bash and Zsh completion scripts and print them to STDOUT. - -Once saved to file, a completion script can be loaded in the shell's -current session as shown: - - $ . <(%s completion) - -To configure your bash shell to load completions for each session add to -your $HOME/.bashrc or $HOME/.profile the following instruction: +func RunWithTrace(ctx context.Context, cmd *cobra.Command) error { + cmd.SilenceUsage = true + cmd.SilenceErrors = true + + if err := cmd.ExecuteContext(ctx); err != nil { + if viper.GetBool(TraceFlag) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + fmt.Fprintf(os.Stderr, "ERROR: %v\n%s\n", err, buf) + } else { + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) + } - . <(%s completion) -`, rootCmd.Use, rootCmd.Use), - RunE: func(cmd *cobra.Command, _ []string) error { - zsh, err := cmd.Flags().GetBool(flagZsh) - if err != nil { - return err - } - if zsh { - return rootCmd.GenZshCompletion(cmd.OutOrStdout()) - } - return rootCmd.GenBashCompletion(cmd.OutOrStdout()) - }, - Hidden: hidden, - Args: cobra.NoArgs, + return err } - - cmd.Flags().Bool(flagZsh, false, "Generate Zsh completion script") - - return cmd + return nil } diff --git a/libs/cli/setup.go b/libs/cli/setup.go index be69c30af..54ea90358 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -1,11 +1,8 @@ package cli import ( - "context" - "fmt" "os" "path/filepath" - "runtime" "strings" "github.com/spf13/cobra" @@ -13,52 +10,27 @@ import ( ) const ( - HomeFlag = "home" - TraceFlag = "trace" - OutputFlag = "output" - EncodingFlag = "encoding" + HomeFlag = "home" + TraceFlag = "trace" + OutputFlag = "output" // used in the cli ) -// Executable is the minimal interface to *corba.Command, so we can -// wrap if desired before the test -type Executable interface { - Execute() error - Context() context.Context -} - // PrepareBaseCmd is meant for tendermint and other servers -func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) *cobra.Command { + // the primary caller of this command is in the SDK and + // returning the cobra.Command object avoids breaking that + // code. In the long term, the SDK could avoid this entirely. cobra.OnInitialize(func() { InitEnv(envPrefix) }) cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") cmd.PersistentPreRunE = concatCobraCmdFuncs(BindFlagsLoadViper, cmd.PersistentPreRunE) - return Executor{cmd, os.Exit} -} - -// PrepareMainCmd is meant for client side libs that want some more flags -// -// This adds --encoding (hex, btc, base64) and --output (text, json) to -// the command. These only really make sense in interactive commands. -func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { - cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") - cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") - cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) - return PrepareBaseCmd(cmd, envPrefix, defaultHome) + return cmd } // InitEnv sets to use ENV variables if set. func InitEnv(prefix string) { - copyEnvVars(prefix) - - // env variables with TM prefix (eg. TM_ROOT) - viper.SetEnvPrefix(prefix) - viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) - viper.AutomaticEnv() -} - -// This copies all variables like TMROOT to TM_ROOT, -// so we can support both formats for the user -func copyEnvVars(prefix string) { + // This copies all variables like TMROOT to TM_ROOT, + // so we can support both formats for the user prefix = strings.ToUpper(prefix) ps := prefix + "_" for _, e := range os.Environ() { @@ -71,42 +43,11 @@ func copyEnvVars(prefix string) { } } } -} - -// Executor wraps the cobra Command with a nicer Execute method -type Executor struct { - *cobra.Command - Exit func(int) // this is os.Exit by default, override in tests -} -type ExitCoder interface { - ExitCode() int -} - -// execute adds all child commands to the root command sets flags appropriately. -// This is called by main.main(). It only needs to happen once to the rootCmd. -func (e Executor) Execute() error { - e.SilenceUsage = true - e.SilenceErrors = true - err := e.Command.Execute() - if err != nil { - if viper.GetBool(TraceFlag) { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - fmt.Fprintf(os.Stderr, "ERROR: %v\n%s\n", err, buf) - } else { - fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) - } - - // return error code 1 by default, can override it with a special error type - exitCode := 1 - if ec, ok := err.(ExitCoder); ok { - exitCode = ec.ExitCode() - } - e.Exit(exitCode) - } - return err + // env variables with TM prefix (eg. TM_ROOT) + viper.SetEnvPrefix(prefix) + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + viper.AutomaticEnv() } type cobraCmdFunc func(cmd *cobra.Command, args []string) error @@ -149,14 +90,3 @@ func BindFlagsLoadViper(cmd *cobra.Command, args []string) error { } return nil } - -func validateOutput(cmd *cobra.Command, args []string) error { - // validate output format - output := viper.GetString(OutputFlag) - switch output { - case "text", "json": - default: - return fmt.Errorf("unsupported output format: %s", output) - } - return nil -} diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index bc62481af..9198485ef 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -1,8 +1,12 @@ package cli import ( + "bytes" + "context" "fmt" + "io" "os" + "path/filepath" "strconv" "strings" "testing" @@ -14,6 +18,9 @@ import ( ) func TestSetupEnv(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { args []string env map[string]string @@ -44,29 +51,36 @@ func TestSetupEnv(t *testing.T) { } demo.Flags().String("foobar", "", "Some test value from config") cmd := PrepareBaseCmd(demo, "DEMO", "/qwerty/asdfgh") // some missing dir.. - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := RunWithArgs(cmd, args, tc.env) + err := RunWithArgs(ctx, cmd, args, tc.env) require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) } } -func tempDir(t *testing.T) string { - t.Helper() - cdir, err := os.MkdirTemp("", "test-cli") - require.NoError(t, err) - return cdir +// writeConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func writeConfigVals(dir string, vals map[string]string) error { + lines := make([]string, 0, len(vals)) + for k, v := range vals { + lines = append(lines, fmt.Sprintf("%s = %q", k, v)) + } + data := strings.Join(lines, "\n") + cfile := filepath.Join(dir, "config.toml") + return os.WriteFile(cfile, []byte(data), 0600) } func TestSetupConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // we pre-create two config files we can refer to in the rest of // the test cases. cval1 := "fubble" - conf1 := tempDir(t) - err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) + conf1 := t.TempDir() + err := writeConfigVals(conf1, map[string]string{"boo": cval1}) require.NoError(t, err) cases := []struct { @@ -103,11 +117,10 @@ func TestSetupConfig(t *testing.T) { boo.Flags().String("boo", "", "Some test value from config") boo.Flags().String("two-words", "", "Check out env handling -") cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := RunWithArgs(cmd, args, tc.env) + err := RunWithArgs(ctx, cmd, args, tc.env) require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) assert.Equal(t, tc.expectedTwo, two, i) @@ -121,15 +134,18 @@ type DemoConfig struct { } func TestSetupUnmarshal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "someone", "else" - conf1 := tempDir(t) - err := WriteConfigVals(conf1, map[string]string{"name": cval1}) + conf1 := t.TempDir() + err := writeConfigVals(conf1, map[string]string{"name": cval1}) require.NoError(t, err) // even with some ignored fields, should be no problem - conf2 := tempDir(t) - err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) + conf2 := t.TempDir() + err = writeConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) require.NoError(t, err) // unused is not declared on a flag and remains from base @@ -182,17 +198,19 @@ func TestSetupUnmarshal(t *testing.T) { // from the default config here marsh.Flags().Int("age", base.Age, "Some test value from config") cmd := PrepareBaseCmd(marsh, "MR", "/qwerty/asdfgh") // some missing dir... - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := RunWithArgs(cmd, args, tc.env) + err := RunWithArgs(ctx, cmd, args, tc.env) require.NoError(t, err, i) assert.Equal(t, tc.expected, cfg, i) } } func TestSetupTrace(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { args []string env map[string]string @@ -215,18 +233,16 @@ func TestSetupTrace(t *testing.T) { }, } cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) + stdout, stderr, err := runCaptureWithArgs(ctx, cmd, args, tc.env) require.Error(t, err, i) require.Equal(t, "", stdout, i) require.NotEqual(t, "", stderr, i) msg := strings.Split(stderr, "\n") desired := fmt.Sprintf("ERROR: %s", tc.expected) - assert.Equal(t, desired, msg[0], i) - t.Log(msg) + assert.Equal(t, desired, msg[0], i, msg) if tc.long && assert.True(t, len(msg) > 2, i) { // the next line starts the stack trace... assert.Contains(t, stderr, "TestSetupTrace", i) @@ -234,3 +250,44 @@ func TestSetupTrace(t *testing.T) { } } } + +// runCaptureWithArgs executes the given command with the specified command +// line args and environmental variables set. It returns string fields +// representing output written to stdout and stderr, additionally any error +// from cmd.Execute() is also returned +func runCaptureWithArgs(ctx context.Context, cmd *cobra.Command, args []string, env map[string]string) (stdout, stderr string, err error) { + oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout + rOut, wOut, _ := os.Pipe() + rErr, wErr, _ := os.Pipe() + os.Stdout, os.Stderr = wOut, wErr + defer func() { + os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout + }() + + // copy the output in a separate goroutine so printing can't block indefinitely + copyStd := func(reader *os.File) *(chan string) { + stdC := make(chan string) + go func() { + var buf bytes.Buffer + // io.Copy will end when we call reader.Close() below + io.Copy(&buf, reader) //nolint:errcheck //ignore error + select { + case <-cmd.Context().Done(): + case stdC <- buf.String(): + } + }() + return &stdC + } + outC := copyStd(rOut) + errC := copyStd(rErr) + + // now run the command + err = RunWithArgs(ctx, cmd, args, env) + + // and grab the stdout to return + wOut.Close() + wErr.Close() + stdout = <-*outC + stderr = <-*errC + return stdout, stderr, err +} diff --git a/libs/events/event_cache.go b/libs/events/event_cache.go deleted file mode 100644 index 41633cbef..000000000 --- a/libs/events/event_cache.go +++ /dev/null @@ -1,39 +0,0 @@ -package events - -import "context" - -// An EventCache buffers events for a Fireable -// All events are cached. Filtering happens on Flush -type EventCache struct { - evsw Fireable - events []eventInfo -} - -// Create a new EventCache with an EventSwitch as backend -func NewEventCache(evsw Fireable) *EventCache { - return &EventCache{ - evsw: evsw, - } -} - -// a cached event -type eventInfo struct { - event string - data EventData -} - -// Cache an event to be fired upon finality. -func (evc *EventCache) FireEvent(event string, data EventData) { - // append to list (go will grow our backing array exponentially) - evc.events = append(evc.events, eventInfo{event, data}) -} - -// Fire events by running evsw.FireEvent on all cached events. Blocks. -// Clears cached events -func (evc *EventCache) Flush(ctx context.Context) { - for _, ei := range evc.events { - evc.evsw.FireEvent(ctx, ei.event, ei.data) - } - // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation - evc.events = nil -} diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go deleted file mode 100644 index fb36fa674..000000000 --- a/libs/events/event_cache_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package events - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/log" -) - -func TestEventCache_Flush(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - logger := log.NewTestingLogger(t) - evsw := NewEventSwitch(logger) - err := evsw.Start(ctx) - require.NoError(t, err) - - err = evsw.AddListenerForEvent("nothingness", "", func(_ context.Context, data EventData) error { - // Check we are not initializing an empty buffer full of zeroed eventInfos in the EventCache - require.FailNow(t, "We should never receive a message on this switch since none are fired") - return nil - }) - require.NoError(t, err) - - evc := NewEventCache(evsw) - evc.Flush(ctx) - // Check after reset - evc.Flush(ctx) - fail := true - pass := false - err = evsw.AddListenerForEvent("somethingness", "something", func(_ context.Context, data EventData) error { - if fail { - require.FailNow(t, "Shouldn't see a message until flushed") - } - pass = true - return nil - }) - require.NoError(t, err) - - evc.FireEvent("something", struct{ int }{1}) - evc.FireEvent("something", struct{ int }{2}) - evc.FireEvent("something", struct{ int }{3}) - fail = false - evc.Flush(ctx) - assert.True(t, pass) -} diff --git a/libs/log/default_test.go b/libs/log/default_test.go index 5e8e18810..6ea723c51 100644 --- a/libs/log/default_test.go +++ b/libs/log/default_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/libs/os/os_test.go b/libs/os/os_test.go index fe503f921..ca7050156 100644 --- a/libs/os/os_test.go +++ b/libs/os/os_test.go @@ -8,11 +8,12 @@ import ( "testing" "github.com/stretchr/testify/require" + tmos "github.com/tendermint/tendermint/libs/os" ) func TestCopyFile(t *testing.T) { - tmpfile, err := os.CreateTemp("", "example") + tmpfile, err := os.CreateTemp(t.TempDir(), "example") if err != nil { t.Fatal(err) } @@ -40,12 +41,10 @@ func TestCopyFile(t *testing.T) { } func TestEnsureDir(t *testing.T) { - tmp, err := os.MkdirTemp("", "ensure-dir") - require.NoError(t, err) - defer os.RemoveAll(tmp) + tmp := t.TempDir() // Should be possible to create a new directory. - err = tmos.EnsureDir(filepath.Join(tmp, "dir"), 0755) + err := tmos.EnsureDir(filepath.Join(tmp, "dir"), 0755) require.NoError(t, err) require.DirExists(t, filepath.Join(tmp, "dir")) @@ -76,11 +75,7 @@ func TestEnsureDir(t *testing.T) { // the origin is positively a non-directory and that it is ready for copying. // See https://github.com/tendermint/tendermint/issues/6427 func TestTrickedTruncation(t *testing.T) { - tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpDir) + tmpDir := t.TempDir() originalWALPath := filepath.Join(tmpDir, "wal") originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") diff --git a/libs/service/service.go b/libs/service/service.go index b36aa1087..daeead03e 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -3,7 +3,7 @@ package service import ( "context" "errors" - "sync/atomic" + "sync" "github.com/tendermint/tendermint/libs/log" ) @@ -30,9 +30,6 @@ type Service interface { // Return true if the service is running IsRunning() bool - // String representation of the service - String() string - // Wait blocks until the service is stopped. Wait() } @@ -40,8 +37,6 @@ type Service interface { // Implementation describes the implementation that the // BaseService implementation wraps. type Implementation interface { - Service - // Called by the Services Start Method OnStart(context.Context) error @@ -57,12 +52,7 @@ Users can override the OnStart/OnStop methods. In the absence of errors, these methods are guaranteed to be called at most once. If OnStart returns an error, service won't be marked as started, so the user can call Start again. -A call to Reset will panic, unless OnReset is overwritten, allowing -OnStart/OnStop to be called again. - -The caller must ensure that Start and Stop are not called concurrently. - -It is ok to call Stop without calling Start first. +It is safe, but an error, to call Stop without calling Start first. Typical usage: @@ -80,23 +70,21 @@ Typical usage: } func (fs *FooService) OnStart(ctx context.Context) error { - fs.BaseService.OnStart() // Always call the overridden method. // initialize private fields // start subroutines, etc. } func (fs *FooService) OnStop() error { - fs.BaseService.OnStop() // Always call the overridden method. // close/destroy private fields // stop subroutines, etc. } */ type BaseService struct { - logger log.Logger - name string - started uint32 // atomic - stopped uint32 // atomic - quit chan struct{} + logger log.Logger + name string + mtx sync.Mutex + quit <-chan (struct{}) + cancel context.CancelFunc // The "subclass" of BaseService impl Implementation @@ -107,7 +95,6 @@ func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseSe return &BaseService{ logger: logger, name: name, - quit: make(chan struct{}), impl: impl, } } @@ -116,83 +103,101 @@ func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseSe // returned if the service is already running or stopped. To restart a // stopped service, call Reset. func (bs *BaseService) Start(ctx context.Context) error { - if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { - if atomic.LoadUint32(&bs.stopped) == 1 { - bs.logger.Error("not starting service; already stopped", "service", bs.name, "impl", bs.impl.String()) - atomic.StoreUint32(&bs.started, 0) - return ErrAlreadyStopped - } + bs.mtx.Lock() + defer bs.mtx.Unlock() - bs.logger.Info("starting service", "service", bs.name, "impl", bs.impl.String()) + if bs.quit != nil { + return ErrAlreadyStarted + } + select { + case <-bs.quit: + return ErrAlreadyStopped + default: + bs.logger.Info("starting service", "service", bs.name, "impl", bs.name) if err := bs.impl.OnStart(ctx); err != nil { - // revert flag - atomic.StoreUint32(&bs.started, 0) return err } + // we need a separate context to ensure that we start + // a thread that will get cleaned up and that the + // Stop/Wait functions work as expected. + srvCtx, cancel := context.WithCancel(context.Background()) + bs.cancel = cancel + bs.quit = srvCtx.Done() + go func(ctx context.Context) { select { - case <-bs.quit: - // someone else explicitly called stop - // and then we shouldn't. + case <-srvCtx.Done(): + // this means stop was called manually return case <-ctx.Done(): - // if nothing is running, no need to - // shut down again. - if !bs.impl.IsRunning() { - return - } - - // the context was cancel and we - // should stop. - if err := bs.Stop(); err != nil { - bs.logger.Error("stopped service", - "err", err.Error(), - "service", bs.name, - "impl", bs.impl.String()) - } - - bs.logger.Info("stopped service", - "service", bs.name, - "impl", bs.impl.String()) + _ = bs.Stop() } + + bs.logger.Info("stopped service", + "service", bs.name) }(ctx) return nil } - - return ErrAlreadyStarted } // Stop implements Service by calling OnStop (if defined) and closing quit // channel. An error will be returned if the service is already stopped. func (bs *BaseService) Stop() error { - if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { - if atomic.LoadUint32(&bs.started) == 0 { - bs.logger.Error("not stopping service; not started yet", "service", bs.name, "impl", bs.impl.String()) - atomic.StoreUint32(&bs.stopped, 0) - return ErrNotStarted - } + bs.mtx.Lock() + defer bs.mtx.Unlock() + + if bs.quit == nil { + return ErrNotStarted + } - bs.logger.Info("stopping service", "service", bs.name, "impl", bs.impl.String()) + select { + case <-bs.quit: + return ErrAlreadyStopped + default: + bs.logger.Info("stopping service", "service", bs.name) bs.impl.OnStop() - close(bs.quit) + bs.cancel() return nil } - - return ErrAlreadyStopped } // IsRunning implements Service by returning true or false depending on the // service's state. func (bs *BaseService) IsRunning() bool { - return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 + bs.mtx.Lock() + defer bs.mtx.Unlock() + + if bs.quit == nil { + return false + } + + select { + case <-bs.quit: + return false + default: + return true + } +} + +func (bs *BaseService) getWait() <-chan struct{} { + bs.mtx.Lock() + defer bs.mtx.Unlock() + + if bs.quit == nil { + out := make(chan struct{}) + close(out) + return out + } + + return bs.quit } // Wait blocks until the service is stopped. -func (bs *BaseService) Wait() { <-bs.quit } +func (bs *BaseService) Wait() { <-bs.getWait() } // String implements Service by returning a string representation of the service. func (bs *BaseService) String() string { return bs.name } diff --git a/libs/service/service_test.go b/libs/service/service_test.go index fcc727fcc..9b4f84de9 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -2,45 +2,136 @@ package service import ( "context" + "sync" "testing" "time" + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) type testService struct { + started bool + stopped bool + multiStopped bool + mu sync.Mutex BaseService } -func (testService) OnStop() {} -func (testService) OnStart(context.Context) error { +func (t *testService) OnStop() { + t.mu.Lock() + defer t.mu.Unlock() + if t.stopped == true { + t.multiStopped = true + } + t.stopped = true +} +func (t *testService) OnStart(context.Context) error { + t.mu.Lock() + defer t.mu.Unlock() + + t.started = true return nil } -func TestBaseServiceWait(t *testing.T) { +func (t *testService) isStarted() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.started +} + +func (t *testService) isStopped() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.stopped +} + +func (t *testService) isMultiStopped() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.multiStopped +} + +func TestBaseService(t *testing.T) { + t.Cleanup(leaktest.Check(t)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - logger := log.NewTestingLogger(t) + logger := log.NewNopLogger() - ts := &testService{} - ts.BaseService = *NewBaseService(logger, "TestService", ts) - err := ts.Start(ctx) - require.NoError(t, err) + t.Run("Wait", func(t *testing.T) { + wctx, wcancel := context.WithCancel(ctx) + defer wcancel() + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + err := ts.Start(wctx) + require.NoError(t, err) + require.True(t, ts.isStarted()) - waitFinished := make(chan struct{}) - go func() { - ts.Wait() - waitFinished <- struct{}{} - }() + waitFinished := make(chan struct{}) + wcancel() + go func() { + ts.Wait() + close(waitFinished) + }() - go cancel() + select { + case <-waitFinished: + assert.True(t, ts.isStopped(), "failed to stop") + assert.False(t, ts.IsRunning(), "is not running") + + case <-time.After(100 * time.Millisecond): + t.Fatal("expected Wait() to finish within 100 ms.") + } + }) + t.Run("ManualStop", func(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + require.False(t, ts.IsRunning()) + require.False(t, ts.isStarted()) + require.NoError(t, ts.Start(ctx)) + + require.True(t, ts.isStarted()) + + require.NoError(t, ts.Stop()) + require.True(t, ts.isStopped()) + require.False(t, ts.IsRunning()) + }) + t.Run("MultiStop", func(t *testing.T) { + t.Run("SingleThreaded", func(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + + require.NoError(t, ts.Start(ctx)) + require.True(t, ts.isStarted()) + require.NoError(t, ts.Stop()) + require.True(t, ts.isStopped()) + require.False(t, ts.isMultiStopped()) + require.Error(t, ts.Stop()) + require.False(t, ts.isMultiStopped()) + }) + t.Run("MultiThreaded", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + + require.NoError(t, ts.Start(ctx)) + require.True(t, ts.isStarted()) + + go func() { _ = ts.Stop() }() + go cancel() + + ts.Wait() + + require.True(t, ts.isStopped()) + require.False(t, ts.isMultiStopped()) + }) + + }) - select { - case <-waitFinished: - // all good - case <-time.After(100 * time.Millisecond): - t.Fatal("expected Wait() to finish within 100 ms.") - } } diff --git a/libs/strings/string.go b/libs/strings/string.go index 6cc0b18ee..95ea03b5a 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -28,54 +28,12 @@ func SplitAndTrimEmpty(s, sep, cutset string) []string { return nonEmptyStrings } -// StringInSlice returns true if a is found the list. -func StringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// SplitAndTrim slices s into all subslices separated by sep and returns a -// slice of the string s with all leading and trailing Unicode code points -// contained in cutset removed. If sep is empty, SplitAndTrim splits after each -// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of -// -1. -func SplitAndTrim(s, sep, cutset string) []string { - if s == "" { - return []string{} - } - - spl := strings.Split(s, sep) - for i := 0; i < len(spl); i++ { - spl[i] = strings.Trim(spl[i], cutset) - } - return spl -} - -// TrimSpace removes all leading and trailing whitespace from the -// string. -func TrimSpace(s string) string { return strings.TrimSpace(s) } - -// Returns true if s is a non-empty printable non-tab ascii character. -func IsASCIIText(s string) bool { +// ASCIITrim removes spaces from an a ASCII string, erroring if the +// sequence is not an ASCII string. +func ASCIITrim(s string) (string, error) { if len(s) == 0 { - return false - } - for _, b := range []byte(s) { - if 32 <= b && b <= 126 { - // good - } else { - return false - } + return "", nil } - return true -} - -// NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. -func ASCIITrim(s string) string { r := make([]byte, 0, len(s)) for _, b := range []byte(s) { switch { @@ -84,10 +42,10 @@ func ASCIITrim(s string) string { case 32 < b && b <= 126: r = append(r, b) default: - panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) + return "", fmt.Errorf("non-ASCII (non-tab) char 0x%X", b) } } - return string(r) + return string(r), nil } // StringSliceEqual checks if string slices a and b are equal diff --git a/libs/strings/string_test.go b/libs/strings/string_test.go index c56116393..79caf5901 100644 --- a/libs/strings/string_test.go +++ b/libs/strings/string_test.go @@ -25,34 +25,48 @@ func TestSplitAndTrimEmpty(t *testing.T) { } } -func TestStringInSlice(t *testing.T) { - require.True(t, StringInSlice("a", []string{"a", "b", "c"})) - require.False(t, StringInSlice("d", []string{"a", "b", "c"})) - require.True(t, StringInSlice("", []string{""})) - require.False(t, StringInSlice("", []string{})) -} - -func TestIsASCIIText(t *testing.T) { - notASCIIText := []string{ - "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", - } - for _, v := range notASCIIText { - require.False(t, IsASCIIText(v), "%q is not ascii-text", v) - } - asciiText := []string{ - " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", - } - for _, v := range asciiText { - require.True(t, IsASCIIText(v), "%q is ascii-text", v) - } +func assertCorrectTrim(t *testing.T, input, expected string) { + t.Helper() + output, err := ASCIITrim(input) + require.NoError(t, err) + require.Equal(t, expected, output) } func TestASCIITrim(t *testing.T) { - require.Equal(t, ASCIITrim(" "), "") - require.Equal(t, ASCIITrim(" a"), "a") - require.Equal(t, ASCIITrim("a "), "a") - require.Equal(t, ASCIITrim(" a "), "a") - require.Panics(t, func() { ASCIITrim("\xC2\xA2") }) + t.Run("Validation", func(t *testing.T) { + t.Run("NonASCII", func(t *testing.T) { + notASCIIText := []string{ + "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", + } + for _, v := range notASCIIText { + _, err := ASCIITrim(v) + require.Error(t, err, "%q is not ascii-text", v) + } + }) + t.Run("EmptyString", func(t *testing.T) { + out, err := ASCIITrim("") + require.NoError(t, err) + require.Zero(t, out) + }) + t.Run("ASCIIText", func(t *testing.T) { + asciiText := []string{ + " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", + } + for _, v := range asciiText { + _, err := ASCIITrim(v) + require.NoError(t, err, "%q is ascii-text", v) + } + }) + _, err := ASCIITrim("\xC2\xA2") + require.Error(t, err) + }) + t.Run("Trimming", func(t *testing.T) { + assertCorrectTrim(t, " ", "") + assertCorrectTrim(t, " a", "a") + assertCorrectTrim(t, "a ", "a") + assertCorrectTrim(t, " a ", "a") + }) + } func TestStringSliceEqual(t *testing.T) { diff --git a/light/client.go b/light/client.go index 92f713b0d..dfb4fccf7 100644 --- a/light/client.go +++ b/light/client.go @@ -444,7 +444,7 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // headers are not adjacent, verifySkipping is performed and necessary (not all) // intermediate headers will be requested. See the specification for details. // Intermediate headers are not saved to database. -// https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md +// https://github.com/tendermint/tendermint/blob/master/spec/light-client/README.md // // If the header, which is older than the currently trusted header, is // requested and the light client does not have it, VerifyHeader will perform: diff --git a/light/doc.go b/light/doc.go index c30c68eb0..b05ffa805 100644 --- a/light/doc.go +++ b/light/doc.go @@ -94,7 +94,7 @@ Check out other examples in example_test.go ## 2. Pure functions to verify a new header (see verifier.go) Verify function verifies a new header against some trusted header. See -https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md +https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md for details. There are two methods of verification: sequential and bisection @@ -118,7 +118,7 @@ as a wrapper, which verifies all the headers, using a light client connected to some other node. See -https://github.com/tendermint/spec/tree/master/spec/light-client +https://github.com/tendermint/tendermint/tree/master/spec/light-client for the light client specification. */ package light diff --git a/light/example_test.go b/light/example_test.go index 7362ca06b..c735c21a2 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -2,8 +2,7 @@ package light_test import ( "context" - stdlog "log" - "os" + "testing" "time" dbm "github.com/tendermint/tm-db" @@ -17,17 +16,17 @@ import ( ) // Manually getting light blocks and verifying them. -func ExampleClient() { +func TestExampleClient(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf, err := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + conf, err := rpctest.CreateConfig(t, "ExampleClient_VerifyLightBlockAtHeight") if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } // Start a test application @@ -35,21 +34,16 @@ func ExampleClient() { _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } defer func() { _ = closer(ctx) }() - dbDir, err := os.MkdirTemp("", "light-client-example") - if err != nil { - stdlog.Fatal(err) - } - defer os.RemoveAll(dbDir) - + dbDir := t.TempDir() chainID := conf.ChainID() primary, err := httpp.New(chainID, conf.RPC.ListenAddress) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } // give Tendermint time to generate some blocks @@ -57,12 +51,12 @@ func ExampleClient() { block, err := primary.LightBlock(ctx, 2) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } db, err := dbm.NewGoLevelDB("light-client-db", dbDir) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } c, err := light.NewClient(ctx, @@ -78,11 +72,11 @@ func ExampleClient() { light.Logger(logger), ) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } defer func() { if err := c.Cleanup(); err != nil { - stdlog.Fatal(err) + t.Fatal(err) } }() @@ -92,19 +86,19 @@ func ExampleClient() { // veify the block at height 3 _, err = c.VerifyLightBlockAtHeight(ctx, 3, time.Now()) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } // retrieve light block at height 3 _, err = c.TrustedLightBlock(3) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } // update to the latest height lb, err := c.Update(ctx, time.Now()) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } logger.Info("verified light block", "light-block", lb) diff --git a/light/helpers_test.go b/light/helpers_test.go index 9f6147526..37b4b5bf3 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" diff --git a/light/light_test.go b/light/light_test.go index 7e8977de9..35e6d1933 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "os" "testing" "time" @@ -28,7 +27,7 @@ func TestClientIntegration_Update(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf, err := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) logger := log.NewTestingLogger(t) @@ -42,10 +41,7 @@ func TestClientIntegration_Update(t *testing.T) { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := os.MkdirTemp("", "light-client-test-update-example") - require.NoError(t, err) - defer os.RemoveAll(dbDir) - + dbDir := t.TempDir() chainID := conf.ChainID() primary, err := httpp.New(chainID, conf.RPC.ListenAddress) @@ -91,7 +87,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf, err := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) logger := log.NewTestingLogger(t) @@ -103,10 +99,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.NoError(t, err) defer func() { require.NoError(t, closer(ctx)) }() - dbDir, err := os.MkdirTemp("", "light-client-test-verify-example") - require.NoError(t, err) - defer os.RemoveAll(dbDir) - + dbDir := t.TempDir() chainID := conf.ChainID() primary, err := httpp.New(chainID, conf.RPC.ListenAddress) @@ -171,7 +164,7 @@ func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*type func TestClientStatusRPC(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf, err := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) // Start a test application @@ -181,10 +174,7 @@ func TestClientStatusRPC(t *testing.T) { require.NoError(t, err) defer func() { require.NoError(t, closer(ctx)) }() - dbDir, err := os.MkdirTemp("", "light-client-test-status-example") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(dbDir) }) - + dbDir := t.TempDir() chainID := conf.ChainID() primary, err := httpp.New(chainID, conf.RPC.ListenAddress) diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index 4c7761d50..cb443caaf 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -35,7 +35,7 @@ func TestNewProvider(t *testing.T) { func TestProvider(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := rpctest.CreateConfig(t.Name()) + cfg, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) // start a tendermint node in the background to test against diff --git a/light/rpc/client.go b/light/rpc/client.go index cfbdaa409..f7f741843 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -446,27 +446,19 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.Re return nil, err } - // proto-encode BeginBlock events - bbeBytes, err := proto.Marshal(&abci.ResponseBeginBlock{ - Events: res.BeginBlockEvents, + // proto-encode FinalizeBlock events + bbeBytes, err := proto.Marshal(&abci.ResponseFinalizeBlock{ + Events: res.FinalizeBlockEvents, }) if err != nil { return nil, err } - // Build a Merkle tree of proto-encoded DeliverTx results and get a hash. + // Build a Merkle tree of proto-encoded FinalizeBlock tx results and get a hash. results := types.NewResults(res.TxsResults) - // proto-encode EndBlock events. - ebeBytes, err := proto.Marshal(&abci.ResponseEndBlock{ - Events: res.EndBlockEvents, - }) - if err != nil { - return nil, err - } - // Build a Merkle tree out of the above 3 binary slices. - rH := merkle.HashFromByteSlices([][]byte{bbeBytes, results.Hash(), ebeBytes}) + rH := merkle.HashFromByteSlices([][]byte{bbeBytes, results.Hash()}) // Verify block results. if !bytes.Equal(rH, trustedBlock.LastResultsHash) { diff --git a/node/node.go b/node/node.go index 4a900368d..62cc2c74f 100644 --- a/node/node.go +++ b/node/node.go @@ -7,10 +7,12 @@ import ( "net" "net/http" "strconv" + "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" @@ -29,7 +31,6 @@ import ( "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/libs/strings" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -550,7 +551,6 @@ func (n *nodeImpl) OnStart(ctx context.Context) error { // OnStop stops the Node. It implements service.Service. func (n *nodeImpl) OnStop() { n.logger.Info("Stopping Node") - for _, es := range n.eventSinks { if err := es.Stop(); err != nil { n.logger.Error("failed to stop event sink", "err", err) diff --git a/node/node_test.go b/node/node_test.go index eafc5ebdd..a9ab13f8c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -39,7 +39,7 @@ import ( ) func TestNodeStartStop(t *testing.T) { - cfg, err := config.ResetTestRoot("node_node_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_node_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -55,11 +55,10 @@ func TestNodeStartStop(t *testing.T) { n, ok := ns.(*nodeImpl) require.True(t, ok) t.Cleanup(func() { - if n.IsRunning() { - bcancel() - n.Wait() - } + bcancel() + n.Wait() }) + t.Cleanup(leaktest.CheckTimeout(t, time.Second)) require.NoError(t, n.Start(ctx)) // wait for the node to produce a block @@ -98,13 +97,14 @@ func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger ns.Wait() } }) + t.Cleanup(leaktest.CheckTimeout(t, time.Second)) return n } func TestNodeDelayedStart(t *testing.T) { - cfg, err := config.ResetTestRoot("node_delayed_start_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_delayed_start_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -126,7 +126,7 @@ func TestNodeDelayedStart(t *testing.T) { } func TestNodeSetAppVersion(t *testing.T) { - cfg, err := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_app_version_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -159,7 +159,7 @@ func TestNodeSetPrivValTCP(t *testing.T) { logger := log.NewNopLogger() - cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addr @@ -196,7 +196,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { addrNoPrefix := testFreeAddr(t) - cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addrNoPrefix @@ -220,7 +220,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = "unix://" + tmpfile @@ -267,7 +267,7 @@ func TestCreateProposalBlock(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_create_proposal") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -340,6 +340,7 @@ func TestCreateProposalBlock(t *testing.T) { height, state, commit, proposerAddr, + nil, ) require.NoError(t, err) @@ -364,7 +365,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_create_proposal") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -415,6 +416,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { height, state, commit, proposerAddr, + nil, ) require.NoError(t, err) @@ -432,7 +434,7 @@ func TestMaxProposalBlockSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_create_proposal") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -526,6 +528,7 @@ func TestMaxProposalBlockSize(t *testing.T) { math.MaxInt64, state, commit, proposerAddr, + nil, ) require.NoError(t, err) @@ -547,7 +550,7 @@ func TestMaxProposalBlockSize(t *testing.T) { } func TestNodeNewSeedNode(t *testing.T) { - cfg, err := config.ResetTestRoot("node_new_node_custom_reactors_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_new_node_custom_reactors_test") require.NoError(t, err) cfg.Mode = config.ModeSeed defer os.RemoveAll(cfg.RootDir) @@ -568,6 +571,7 @@ func TestNodeNewSeedNode(t *testing.T) { logger, ) t.Cleanup(ns.Wait) + t.Cleanup(leaktest.CheckTimeout(t, time.Second)) require.NoError(t, err) n, ok := ns.(*seedNodeImpl) @@ -584,7 +588,7 @@ func TestNodeNewSeedNode(t *testing.T) { } func TestNodeSetEventSink(t *testing.T) { - cfg, err := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_app_version_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -724,7 +728,7 @@ func loadStatefromGenesis(ctx context.Context, t *testing.T) sm.State { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - cfg, err := config.ResetTestRoot("load_state_from_genesis") + cfg, err := config.ResetTestRoot(t.TempDir(), "load_state_from_genesis") require.NoError(t, err) loadedState, err := stateStore.Load() diff --git a/node/seed.go b/node/seed.go index ef3e61df0..970896cc6 100644 --- a/node/seed.go +++ b/node/seed.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "strings" "time" "github.com/tendermint/tendermint/config" @@ -13,7 +14,6 @@ import ( sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/libs/strings" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" ) diff --git a/privval/file_test.go b/privval/file_test.go index 9e0c3d691..91c2e2a9b 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -21,9 +21,9 @@ import ( ) func TestGenLoadValidator(t *testing.T) { - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -44,9 +44,9 @@ func TestResetValidator(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -74,9 +74,9 @@ func TestResetValidator(t *testing.T) { } func TestLoadOrGenValidator(t *testing.T) { - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) tempKeyFilePath := tempKeyFile.Name() @@ -160,9 +160,9 @@ func TestSignVote(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -215,9 +215,9 @@ func TestSignProposal(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -263,9 +263,9 @@ func TestDifferByTimestamp(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") diff --git a/privval/grpc/client_test.go b/privval/grpc/client_test.go index 86bf4bb2b..ac7608274 100644 --- a/privval/grpc/client_test.go +++ b/privval/grpc/client_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 0361139da..a3ea6c532 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -10,13 +10,14 @@ import ( grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + tmnet "github.com/tendermint/tendermint/libs/net" ) // DefaultDialOptions constructs a list of grpc dial options diff --git a/privval/socket_dialers_test.go b/privval/socket_dialers_test.go index 1ff738cbb..7ec8fe30f 100644 --- a/privval/socket_dialers_test.go +++ b/privval/socket_dialers_test.go @@ -23,7 +23,7 @@ func getFreeLocalhostAddrPort(t *testing.T) string { func getDialerTestCases(t *testing.T) []dialerTestCase { tcpAddr := getFreeLocalhostAddrPort(t) - unixFilePath, err := testUnixAddr() + unixFilePath, err := testUnixAddr(t) require.NoError(t, err) unixAddr := fmt.Sprintf("unix://%s", unixFilePath) diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index c411332b2..e91d111d0 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" ) @@ -28,14 +29,17 @@ type listenerTestCase struct { // testUnixAddr will attempt to obtain a platform-independent temporary file // name for a Unix socket -func testUnixAddr() (string, error) { - f, err := os.CreateTemp("", "tendermint-privval-test-*") +func testUnixAddr(t *testing.T) (string, error) { + // N.B. We can't use t.TempDir here because socket filenames have a + // restrictive length limit (~100 bytes) for silly historical reasons. + f, err := os.CreateTemp("", "tendermint-privval-test-*.sock") if err != nil { return "", err } addr := f.Name() f.Close() - os.Remove(addr) + os.Remove(addr) // remove so the test can bind it + t.Cleanup(func() { os.Remove(addr) }) // clean up after the test return addr, nil } @@ -56,7 +60,7 @@ func tcpListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Dura } func unixListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) listenerTestCase { - addr, err := testUnixAddr() + addr, err := testUnixAddr(t) if err != nil { t.Fatal(err) } diff --git a/proto/README.md b/proto/README.md index ebecd82d1..07d61d62c 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,23 +1,21 @@ # Protocol Buffers -This sections defines the types and messages shared across implementations. The definition of the data structures are located in the [core/data_structures](../spec/core/data_structures.md) for the core data types and ABCI definitions are located in the [ABCI](../spec/abci/README.md) section. +This sections defines the protocol buffers used in Tendermint. This is split into two directories: `spec`, the types required for all implementations and `tendermint`, a set of types internal to the Go implementation. All generated go code is also stored in `tendermint`. +More descriptions of the data structures are located in the spec directory as follows: -## Process of Updates +- [Block](../spec/core/data_structures.md) +- [ABCI](../spec/abci/README.md) +- [P2P](../spec/p2p/messages/README.md) + +## Process to generate protos The `.proto` files within this section are core to the protocol and updates must be treated as such. ### Steps 1. Make an issue with the proposed change. - - Within in the issue members from both the Tendermint-go and Tendermint-rs team will leave comments. If there is not consensus on the change an [RFC](../rfc/README.md) may be requested. + - Within the issue members, from the Tendermint team will leave comments. If there is not consensus on the change an [RFC](../rfc/README.md) may be requested. 1a. Submission of an RFC as a pull request should be made to facilitate further discussion. 1b. Merge the RFC. 2. Make the necessary changes to the `.proto` file(s), [core data structures](../spec/core/data_structures.md) and/or [ABCI protocol](../spec/abci/apps.md). -3. Open issues within Tendermint-go and Tendermint-rs repos. This is used to notify the teams that a change occurred in the spec. - 1. Tag the issue with a spec version label. This will notify the team the changed has been made on master but has not entered a release. - -### Versioning - -The spec repo aims to be versioned. Once it has been versioned, updates to the protobuf files will live on master. After a certain amount of time, decided on by Tendermint-go and Tendermint-rs team leads, a release will be made on the spec repo. The spec may contain minor releases as well, depending on the implementation these changes may lead to a breaking change. If so, the implementation team should open an issue within the spec repo requiring a major release of the spec. - -If the steps above were followed each implementation should have issues tagged with a spec change label. Once all issues have been completed the team should signify their readiness for release. +3. Rebuild the Go protocol buffers by running `make proto-gen`. Ensure that the project builds correctly by running `make build`. diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 35e4c3c56..129c96fe2 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -21,26 +21,26 @@ import "gogoproto/gogo.proto"; message Request { oneof value { - RequestEcho echo = 1; - RequestFlush flush = 2; - RequestInfo info = 3; - RequestInitChain init_chain = 4; - RequestQuery query = 5; - RequestBeginBlock begin_block = 6 [deprecated = true]; - RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8 [deprecated = true]; - RequestEndBlock end_block = 9 [deprecated = true]; - RequestCommit commit = 10; - RequestListSnapshots list_snapshots = 11; - RequestOfferSnapshot offer_snapshot = 12; - RequestLoadSnapshotChunk load_snapshot_chunk = 13; - RequestApplySnapshotChunk apply_snapshot_chunk = 14; - RequestPrepareProposal prepare_proposal = 15; - RequestProcessProposal process_proposal = 16; - RequestFinalizeBlock finalize_block = 19; + RequestEcho echo = 1; + RequestFlush flush = 2; + RequestInfo info = 3; + RequestInitChain init_chain = 4; + RequestQuery query = 5; + RequestBeginBlock begin_block = 6 [deprecated = true]; + RequestCheckTx check_tx = 7; + RequestDeliverTx deliver_tx = 8 [deprecated = true]; + RequestEndBlock end_block = 9 [deprecated = true]; + RequestCommit commit = 10; + RequestListSnapshots list_snapshots = 11; + RequestOfferSnapshot offer_snapshot = 12; + RequestLoadSnapshotChunk load_snapshot_chunk = 13; + RequestApplySnapshotChunk apply_snapshot_chunk = 14; + RequestPrepareProposal prepare_proposal = 15; + RequestProcessProposal process_proposal = 16; + RequestFinalizeBlock finalize_block = 19; } - reserved 17; // Placeholder for RequestExtendVote in v0.37 - reserved 18; // Placeholder for RequestVerifyVoteExtension in v0.37 + reserved 17; // Placeholder for RequestExtendVote in v0.37 + reserved 18; // Placeholder for RequestVerifyVoteExtension in v0.37 } message RequestEcho { @@ -123,15 +123,15 @@ message RequestApplySnapshotChunk { } message RequestPrepareProposal { - bytes hash = 1; - tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; + bytes hash = 1; + tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; // txs is an array of transactions that will be included in a block, // sent to the app for possible modifications. - repeated bytes txs = 3; - LastCommitInfo last_commit_info = 4 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; + repeated bytes txs = 3; + LastCommitInfo last_commit_info = 4 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false]; // the modified transactions cannot exceed this size. - int64 max_tx_bytes = 6; + int64 max_tx_bytes = 6; } message RequestProcessProposal { @@ -155,27 +155,27 @@ message RequestFinalizeBlock { message Response { oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseInitChain init_chain = 5; - ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7 [deprecated = true]; - ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9 [deprecated = true]; - ResponseEndBlock end_block = 10 [deprecated = true]; - ResponseCommit commit = 11; - ResponseListSnapshots list_snapshots = 12; - ResponseOfferSnapshot offer_snapshot = 13; - ResponseLoadSnapshotChunk load_snapshot_chunk = 14; - ResponseApplySnapshotChunk apply_snapshot_chunk = 15; - ResponsePrepareProposal prepare_proposal = 16; - ResponseProcessProposal process_proposal = 17; - ResponseFinalizeBlock finalize_block = 20; + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseInitChain init_chain = 5; + ResponseQuery query = 6; + ResponseBeginBlock begin_block = 7 [deprecated = true]; + ResponseCheckTx check_tx = 8; + ResponseDeliverTx deliver_tx = 9 [deprecated = true]; + ResponseEndBlock end_block = 10 [deprecated = true]; + ResponseCommit commit = 11; + ResponseListSnapshots list_snapshots = 12; + ResponseOfferSnapshot offer_snapshot = 13; + ResponseLoadSnapshotChunk load_snapshot_chunk = 14; + ResponseApplySnapshotChunk apply_snapshot_chunk = 15; + ResponsePrepareProposal prepare_proposal = 16; + ResponseProcessProposal process_proposal = 17; + ResponseFinalizeBlock finalize_block = 20; } - reserved 18; // Placeholder for ResponseExtendVote in v0.37 - reserved 19; // Placeholder for ResponseVerifyVoteExtension in v0.37 + reserved 18; // Placeholder for ResponseExtendVote in v0.37 + reserved 19; // Placeholder for ResponseVerifyVoteExtension in v0.37 } // nondeterministic @@ -308,7 +308,7 @@ message ResponsePrepareProposal { repeated ExecTxResult tx_results = 4; repeated ValidatorUpdate validator_updates = 5; tendermint.types.ConsensusParams consensus_param_updates = 6; - reserved 7; // Placeholder for app_signed_updates in v0.37 + reserved 7; // Placeholder for app_signed_updates in v0.37 } message ResponseProcessProposal { @@ -320,7 +320,7 @@ message ResponseProcessProposal { } message ResponseFinalizeBlock { - repeated Event block_events = 1 + repeated Event block_events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; repeated ExecTxResult tx_results = 2; repeated ValidatorUpdate validator_updates = 3; @@ -364,7 +364,7 @@ message ExecTxResult { int64 gas_used = 6; repeated Event tx_events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic - string codespace = 8; + string codespace = 8; } // TxResult contains results of executing the transaction. @@ -378,8 +378,8 @@ message TxResult { } message TxRecord { - TxAction action = 1; - bytes tx = 2; + TxAction action = 1; + bytes tx = 2; repeated bytes new_hashes = 3; // TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal @@ -410,10 +410,10 @@ message ValidatorUpdate { // VoteInfo message VoteInfo { - Validator validator = 1 [(gogoproto.nullable) = false]; - bool signed_last_block = 2; - reserved 3; // Placeholder for tendermint_signed_extension in v0.37 - reserved 4; // Placeholder for app_signed_extension in v0.37 + Validator validator = 1 [(gogoproto.nullable) = false]; + bool signed_last_block = 2; + reserved 3; // Placeholder for tendermint_signed_extension in v0.37 + reserved 4; // Placeholder for app_signed_extension in v0.37 } enum EvidenceType { diff --git a/proto/tendermint/consensus/wal.proto b/proto/tendermint/consensus/wal.proto index 44afa2c0c..22531e0d0 100644 --- a/proto/tendermint/consensus/wal.proto +++ b/proto/tendermint/consensus/wal.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.consensus; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; - import "gogoproto/gogo.proto"; import "tendermint/consensus/types.proto"; import "tendermint/types/events.proto"; diff --git a/proto/tendermint/privval/service.proto b/proto/tendermint/privval/service.proto index 2c699e1cd..63e9afca7 100644 --- a/proto/tendermint/privval/service.proto +++ b/proto/tendermint/privval/service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; package tendermint.privval; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; import "tendermint/privval/types.proto"; diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 85f38cada..af5c64ecf 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -34,9 +34,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // of the various ABCI calls during block processing. // It is persisted to disk for each height before calling Commit. type ABCIResponses struct { - DeliverTxs []*types.ResponseDeliverTx `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` - EndBlock *types.ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` - BeginBlock *types.ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` + FinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=finalize_block,json=finalizeBlock,proto3" json:"finalize_block,omitempty"` } func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } @@ -72,23 +70,9 @@ func (m *ABCIResponses) XXX_DiscardUnknown() { var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo -func (m *ABCIResponses) GetDeliverTxs() []*types.ResponseDeliverTx { +func (m *ABCIResponses) GetFinalizeBlock() *types.ResponseFinalizeBlock { if m != nil { - return m.DeliverTxs - } - return nil -} - -func (m *ABCIResponses) GetEndBlock() *types.ResponseEndBlock { - if m != nil { - return m.EndBlock - } - return nil -} - -func (m *ABCIResponses) GetBeginBlock() *types.ResponseBeginBlock { - if m != nil { - return m.BeginBlock + return m.FinalizeBlock } return nil } @@ -422,55 +406,52 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 763 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30, - 0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34, - 0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03, - 0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff, - 0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10, - 0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c, - 0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08, - 0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d, - 0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84, - 0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9, - 0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70, - 0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43, - 0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0, - 0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5, - 0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80, - 0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd, - 0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb, - 0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba, - 0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89, - 0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6, - 0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e, - 0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f, - 0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c, - 0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69, - 0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41, - 0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8, - 0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7, - 0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6, - 0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71, - 0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07, - 0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89, - 0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc, - 0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56, - 0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9, - 0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9, - 0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35, - 0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57, - 0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83, - 0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd, - 0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f, - 0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72, - 0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8, - 0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07, - 0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab, - 0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e, - 0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4, - 0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00, + // 717 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4d, 0x6f, 0xd3, 0x4a, + 0x14, 0x8d, 0x5f, 0x3f, 0x92, 0x4c, 0x9a, 0xa4, 0x6f, 0xfa, 0x16, 0x69, 0xfa, 0xea, 0xe4, 0x45, + 0x8f, 0xaa, 0x62, 0xe1, 0x48, 0xb0, 0x40, 0x6c, 0x90, 0x9a, 0x54, 0x50, 0x4b, 0x05, 0x81, 0x8b, + 0xba, 0x60, 0x81, 0x35, 0x71, 0x26, 0xf6, 0x08, 0xc7, 0xb6, 0x3c, 0x93, 0xf2, 0xb1, 0x67, 0xdf, + 0x2d, 0xff, 0xa8, 0xcb, 0x2e, 0x59, 0x15, 0x48, 0xff, 0x08, 0x9a, 0x0f, 0xdb, 0x93, 0x84, 0x45, + 0x11, 0xbb, 0xcc, 0x3d, 0xe7, 0x9e, 0x7b, 0xe6, 0xce, 0xbd, 0x31, 0xf8, 0x97, 0xe1, 0x68, 0x8c, + 0xd3, 0x29, 0x89, 0x58, 0x9f, 0x32, 0xc4, 0x70, 0x9f, 0x7d, 0x4c, 0x30, 0xb5, 0x92, 0x34, 0x66, + 0x31, 0xdc, 0x2e, 0x50, 0x4b, 0xa0, 0xed, 0x7f, 0xfc, 0xd8, 0x8f, 0x05, 0xd8, 0xe7, 0xbf, 0x24, + 0xaf, 0xbd, 0xa7, 0xa9, 0xa0, 0x91, 0x47, 0x74, 0x91, 0xb6, 0x5e, 0x42, 0xc4, 0x17, 0xd0, 0xee, + 0x0a, 0x7a, 0x81, 0x42, 0x32, 0x46, 0x2c, 0x4e, 0x15, 0x63, 0x7f, 0x85, 0x91, 0xa0, 0x14, 0x4d, + 0x33, 0x01, 0x53, 0x83, 0x2f, 0x70, 0x4a, 0x49, 0x1c, 0x2d, 0x14, 0xe8, 0xf8, 0x71, 0xec, 0x87, + 0xb8, 0x2f, 0x4e, 0xa3, 0xd9, 0xa4, 0xcf, 0xc8, 0x14, 0x53, 0x86, 0xa6, 0x89, 0x24, 0xf4, 0xde, + 0x82, 0xfa, 0xd1, 0x60, 0x68, 0x3b, 0x98, 0x26, 0x71, 0x44, 0x31, 0x85, 0xcf, 0x41, 0x63, 0x42, + 0x22, 0x14, 0x92, 0x4f, 0xd8, 0x1d, 0x85, 0xb1, 0xf7, 0xae, 0xf5, 0x57, 0xd7, 0x38, 0xac, 0x3d, + 0x38, 0xb0, 0xb4, 0x76, 0xf0, 0x6b, 0x5a, 0x59, 0xce, 0x53, 0x45, 0x1f, 0x70, 0xb6, 0x53, 0x9f, + 0xe8, 0xc7, 0xde, 0x67, 0x03, 0x34, 0xce, 0xb3, 0x3b, 0x51, 0x3b, 0x9a, 0xc4, 0x70, 0x08, 0xea, + 0xf9, 0x2d, 0x5d, 0x8a, 0x59, 0xcb, 0x10, 0x05, 0x4c, 0xbd, 0x80, 0xbc, 0x43, 0x9e, 0x78, 0x86, + 0x99, 0xb3, 0x75, 0xa1, 0x9d, 0xa0, 0x05, 0x76, 0x42, 0x44, 0x99, 0x1b, 0x60, 0xe2, 0x07, 0xcc, + 0xf5, 0x02, 0x14, 0xf9, 0x78, 0x2c, 0xbc, 0xae, 0x39, 0x7f, 0x73, 0xe8, 0x44, 0x20, 0x43, 0x09, + 0xf4, 0xbe, 0x18, 0x60, 0x67, 0xc8, 0xdd, 0x46, 0x74, 0x46, 0x5f, 0x8a, 0x16, 0x0a, 0x33, 0x0e, + 0xd8, 0xf6, 0xb2, 0xb0, 0x2b, 0x5b, 0xab, 0xfc, 0xfc, 0xb7, 0xea, 0x67, 0x49, 0x60, 0xb0, 0x7e, + 0x75, 0xd3, 0x29, 0x39, 0x4d, 0x6f, 0x31, 0xfc, 0xdb, 0xde, 0x02, 0x50, 0x3e, 0x97, 0x6f, 0x07, + 0x8f, 0x40, 0x35, 0x57, 0x53, 0x3e, 0xf6, 0x75, 0x1f, 0xea, 0x8d, 0x0b, 0x27, 0xca, 0x43, 0x91, + 0x05, 0xdb, 0xa0, 0x42, 0xe3, 0x09, 0x7b, 0x8f, 0x52, 0x2c, 0x4a, 0x56, 0x9d, 0xfc, 0xdc, 0xfb, + 0xb1, 0x09, 0x36, 0xce, 0xf8, 0x28, 0xc3, 0xc7, 0xa0, 0xac, 0xb4, 0x54, 0x99, 0x5d, 0x6b, 0x79, + 0xdc, 0x2d, 0x65, 0x4a, 0x95, 0xc8, 0xf8, 0xf0, 0x00, 0x54, 0xbc, 0x00, 0x91, 0xc8, 0x25, 0xf2, + 0x4e, 0xd5, 0x41, 0x6d, 0x7e, 0xd3, 0x29, 0x0f, 0x79, 0xcc, 0x3e, 0x76, 0xca, 0x02, 0xb4, 0xc7, + 0xf0, 0x1e, 0x68, 0x90, 0x88, 0x30, 0x82, 0x42, 0xd5, 0x89, 0x56, 0x43, 0x74, 0xa0, 0xae, 0xa2, + 0xb2, 0x09, 0xf0, 0x3e, 0x10, 0x2d, 0x91, 0xc3, 0x96, 0x31, 0xd7, 0x04, 0xb3, 0xc9, 0x01, 0x31, + 0x47, 0x8a, 0xeb, 0x80, 0xba, 0xc6, 0x25, 0xe3, 0xd6, 0xfa, 0xaa, 0x77, 0xf9, 0x54, 0x22, 0xcb, + 0x3e, 0x1e, 0xec, 0x70, 0xef, 0xf3, 0x9b, 0x4e, 0xed, 0x34, 0x93, 0xb2, 0x8f, 0x9d, 0x5a, 0xae, + 0x6b, 0x8f, 0xe1, 0x29, 0x68, 0x6a, 0x9a, 0x7c, 0x3f, 0x5a, 0x1b, 0x42, 0xb5, 0x6d, 0xc9, 0xe5, + 0xb1, 0xb2, 0xe5, 0xb1, 0x5e, 0x67, 0xcb, 0x33, 0xa8, 0x70, 0xd9, 0xcb, 0x6f, 0x1d, 0xc3, 0xa9, + 0xe7, 0x5a, 0x1c, 0x85, 0xcf, 0x40, 0x33, 0xc2, 0x1f, 0x98, 0x9b, 0x0f, 0x2b, 0x6d, 0x6d, 0xde, + 0x69, 0xbc, 0x1b, 0x3c, 0xad, 0xd8, 0x14, 0xf8, 0x04, 0x00, 0x4d, 0xa3, 0x7c, 0x27, 0x0d, 0x2d, + 0x83, 0x1b, 0x11, 0xd7, 0xd2, 0x44, 0x2a, 0x77, 0x33, 0xc2, 0xd3, 0x34, 0x23, 0x43, 0x60, 0xea, + 0xd3, 0x5c, 0xe8, 0xe5, 0x83, 0x5d, 0x15, 0x8f, 0xb5, 0x57, 0x0c, 0x76, 0x91, 0xad, 0x46, 0xfc, + 0x97, 0x6b, 0x06, 0xfe, 0x70, 0xcd, 0x5e, 0x80, 0xff, 0x17, 0xd6, 0x6c, 0x49, 0x3f, 0xb7, 0x57, + 0x13, 0xf6, 0xba, 0xda, 0xde, 0x2d, 0x0a, 0x65, 0x1e, 0xb3, 0x41, 0x4c, 0x31, 0x9d, 0x85, 0x8c, + 0xba, 0x01, 0xa2, 0x41, 0x6b, 0xab, 0x6b, 0x1c, 0x6e, 0xc9, 0x41, 0x74, 0x64, 0xfc, 0x04, 0xd1, + 0x00, 0xee, 0x82, 0x0a, 0x4a, 0x12, 0x49, 0xa9, 0x0b, 0x4a, 0x19, 0x25, 0x09, 0x87, 0x06, 0xaf, + 0xae, 0xe6, 0xa6, 0x71, 0x3d, 0x37, 0x8d, 0xef, 0x73, 0xd3, 0xb8, 0xbc, 0x35, 0x4b, 0xd7, 0xb7, + 0x66, 0xe9, 0xeb, 0xad, 0x59, 0x7a, 0xf3, 0xc8, 0x27, 0x2c, 0x98, 0x8d, 0x2c, 0x2f, 0x9e, 0xf6, + 0xf5, 0xbf, 0xf5, 0xe2, 0xa7, 0xfc, 0xb6, 0x2c, 0x7f, 0x95, 0x46, 0x9b, 0x22, 0xfe, 0xf0, 0x67, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x88, 0xe8, 0x4c, 0x4d, 0xb0, 0x06, 0x00, 0x00, } func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { @@ -493,21 +474,9 @@ func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.BeginBlock != nil { + if m.FinalizeBlock != nil { { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -517,20 +486,6 @@ func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.DeliverTxs) > 0 { - for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } @@ -747,12 +702,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err10 != nil { - return 0, err10 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err9 != nil { + return 0, err9 } - i -= n10 - i = encodeVarintTypes(dAtA, i, uint64(n10)) + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x2a { @@ -807,18 +762,8 @@ func (m *ABCIResponses) Size() (n int) { } var l int _ = l - if len(m.DeliverTxs) > 0 { - for _, e := range m.DeliverTxs { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.BeginBlock != nil { - l = m.BeginBlock.Size() + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() n += 1 + l + sovTypes(uint64(l)) } return n @@ -957,79 +902,9 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeliverTxs = append(m.DeliverTxs, &types.ResponseDeliverTx{}) - if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EndBlock == nil { - m.EndBlock = &types.ResponseEndBlock{} - } - if err := m.EndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1056,10 +931,10 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BeginBlock == nil { - m.BeginBlock = &types.ResponseBeginBlock{} + if m.FinalizeBlock == nil { + m.FinalizeBlock = &types.ResponseFinalizeBlock{} } - if err := m.BeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index 919da91e5..35eab761d 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -15,9 +15,7 @@ import "google/protobuf/timestamp.proto"; // of the various ABCI calls during block processing. // It is persisted to disk for each height before calling Commit. message ABCIResponses { - repeated tendermint.abci.ResponseDeliverTx deliver_txs = 1; - tendermint.abci.ResponseEndBlock end_block = 2; - tendermint.abci.ResponseBeginBlock begin_block = 3; + tendermint.abci.ResponseFinalizeBlock finalize_block = 2; } // ValidatorsInfo represents the latest validator set, or the last height it changed diff --git a/proto/tendermint/types/canonical.proto b/proto/tendermint/types/canonical.proto index b7a66d4d2..58d8c44e9 100644 --- a/proto/tendermint/types/canonical.proto +++ b/proto/tendermint/types/canonical.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "google/protobuf/timestamp.proto"; @@ -34,5 +32,4 @@ message CanonicalVote { CanonicalBlockID block_id = 4 [(gogoproto.customname) = "BlockID"]; google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; string chain_id = 6 [(gogoproto.customname) = "ChainID"]; - VoteExtensionToSign vote_extension = 7; } diff --git a/proto/tendermint/types/events.proto b/proto/tendermint/types/events.proto index a1e5cc498..1ef715872 100644 --- a/proto/tendermint/types/events.proto +++ b/proto/tendermint/types/events.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - message EventDataRoundState { int64 height = 1; int32 round = 2; diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index 18fe9a503..a87670c9f 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -71,7 +71,7 @@ message HashedParams { // SynchronyParams configure the bounds under which a proposed block's timestamp is considered valid. // These parameters are part of the proposer-based timestamps algorithm. For more information, // see the specification of proposer-based timestamps: -// https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp +// https://github.com/tendermint/tendermint/tree/master/spec/consensus/proposer-based-timestamp message SynchronyParams { // message_delay bounds how long a proposal message may take to reach all validators on a newtork // and still be considered valid. diff --git a/rfc/README.md b/rfc/README.md deleted file mode 100644 index 7e4dfdd33..000000000 --- a/rfc/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Request for Comments (RFC) - -RFC stands for `Request for Comments`. It is a social device use to float and polish an idea prior to the inclusion into an existing or new spec/paper/research topic. - -An RFC should not be used for bug reports or trivial discussions - the overhead of compiling an RFC does not justify it. - -An RFC should not consist only of a problem statement (use a standard issue for that). - -A RFC should consist of: - -- Changelog -- Context on the relevant goals and the current state -- Proposed Solution -- Summary of pros and cons -- References - -If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. - -**Note the context/background should be written in the present tense.** - -Some RFC's will be presented at a Tendermint Dev Session. If you are an outside contributor and have submitted a RFC, you may be invited to present your RFC at one of these calls. - -## Table of Contents - -- [001-block-retention](./001-block-retention.md) -- [002-nonzero-genesis](./002-nonzero-genesis.md) -- [003-ed25519-verification](./003-ed25519-verification.md) -- [004-abci++](./004-abci++.md) -- [005-reverse-sync](./005-reverse-sync.md) -- [006-semantic-versioning](./006-semantic-versioning.md) diff --git a/rfc/rfc_template.md b/rfc/rfc_template.md deleted file mode 100644 index 817066ee4..000000000 --- a/rfc/rfc_template.md +++ /dev/null @@ -1,39 +0,0 @@ -# RFC {RFC-NUMBER}: {TITLE} - -## Changelog - -- {date}: {changelog} - -## Author(s) - -- {First Name} {github handle} - -## Context - -> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. - -## Proposal - -> It should contain a detailed breakdown of how the problem should be resolved including diagrams and other supporting materials needed to present the case and implementation roadmap for the proposed changes. The reader should be able to fully understand the proposal. This section should be broken up using ## subsections as needed. - -## Status - -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later RFC changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. - -{Deprecated|Proposed|Accepted} - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! - -- {reference link} diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index e26d499f1..1591862cf 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/rpc/coretypes" @@ -22,7 +23,7 @@ func TestHTTPSimple(t *testing.T) { // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf, err := rpctest.CreateConfig("ExampleHTTP_simple") + conf, err := rpctest.CreateConfig(t, "ExampleHTTP_simple") require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) @@ -72,7 +73,7 @@ func TestHTTPBatching(t *testing.T) { // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf, err := rpctest.CreateConfig("ExampleHTTP_batching") + conf, err := rpctest.CreateConfig(t, "ExampleHTTP_batching") require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index ad3043098..9bd52174b 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -2,12 +2,12 @@ package client_test import ( "context" - "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -20,13 +20,11 @@ func NodeSuite(t *testing.T, logger log.Logger) (service.Service, *config.Config ctx, cancel := context.WithCancel(context.Background()) - conf, err := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) // start a tendermint node in the background to test against - dir, err := os.MkdirTemp("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) - require.NoError(t, err) - + dir := t.TempDir() app := kvstore.NewPersistentKVStoreApplication(logger, dir) node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 700b08f5e..1d04fa4cd 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -55,7 +55,8 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes if res.CheckTx.IsErr() { return &res, nil } - res.DeliverTx = a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) + fb := a.App.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) + res.DeliverTx = *fb.Txs[0] res.Height = -1 // TODO return &res, nil } @@ -64,7 +65,7 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes. c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() + go func() { a.App.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) }() } return &coretypes.ResultBroadcastTx{ Code: c.Code, @@ -79,7 +80,7 @@ func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.R c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() + go func() { a.App.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) }() } return &coretypes.ResultBroadcastTx{ Code: c.Code, diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index f3b94693e..c4b1ddd8f 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -68,8 +68,7 @@ type ResultBlockResults struct { Height int64 `json:"height,string"` TxsResults []*abci.ResponseDeliverTx `json:"txs_results"` TotalGasUsed int64 `json:"total_gas_used,string"` - BeginBlockEvents []abci.Event `json:"begin_block_events"` - EndBlockEvents []abci.Event `json:"end_block_events"` + FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` ConsensusParamUpdates *tmproto.ConsensusParams `json:"consensus_param_updates"` } diff --git a/rpc/coretypes/responses_test.go b/rpc/coretypes/responses_test.go index a85f3f777..d4ced795a 100644 --- a/rpc/coretypes/responses_test.go +++ b/rpc/coretypes/responses_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index cf04e704d..3a626e43a 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -11,7 +11,6 @@ import ( "time" "github.com/gorilla/websocket" - metrics "github.com/rcrowley/go-metrics" "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" @@ -66,10 +65,9 @@ type WSClient struct { // nolint: maligned wg sync.WaitGroup - mtx sync.RWMutex - sentLastPingAt time.Time - reconnecting bool - nextReqID int + mtx sync.RWMutex + reconnecting bool + nextReqID int // sentIDs map[types.JSONRPCIntID]bool // IDs of the requests currently in flight // Time allowed to write a message to the server. 0 means block until operation succeeds. @@ -80,10 +78,6 @@ type WSClient struct { // nolint: maligned // Send pings to server with this period. Must be less than readWait. If 0, no pings will be sent. pingPeriod time.Duration - - // Time between sending a ping and receiving a pong. See - // https://godoc.org/github.com/rcrowley/go-metrics#Timer. - PingPongLatencyTimer metrics.Timer } // NewWS returns a new client with default options. The endpoint argument must @@ -117,8 +111,6 @@ func NewWS(remoteAddr, endpoint string) (*WSClient, error) { // sentIDs: make(map[types.JSONRPCIntID]bool), } - - c.PingPongLatencyTimer = metrics.NewTimer() return c, nil } @@ -165,7 +157,6 @@ func (c *WSClient) Start(ctx context.Context) error { func (c *WSClient) Stop() error { // only close user-facing channels when we can't write to them c.wg.Wait() - c.PingPongLatencyTimer.Stop() close(c.ResponsesCh) return nil } @@ -386,9 +377,6 @@ func (c *WSClient) writeRoutine(ctx context.Context) { c.reconnectAfter <- err return } - c.mtx.Lock() - c.sentLastPingAt = time.Now() - c.mtx.Unlock() case <-c.readRoutineQuit: return case <-ctx.Done(): @@ -411,16 +399,6 @@ func (c *WSClient) readRoutine(ctx context.Context) { c.wg.Done() }() - c.conn.SetPongHandler(func(string) error { - // gather latency stats - c.mtx.RLock() - t := c.sentLastPingAt - c.mtx.RUnlock() - c.PingPongLatencyTimer.UpdateSince(t) - - return nil - }) - for { // reset deadline for every message type (control or data) if c.readWait > 0 { diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index 37bd64b22..5bbb5fc25 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -11,20 +11,12 @@ import ( "github.com/fortytw2/leaktest" "github.com/gorilla/websocket" - metrics "github.com/rcrowley/go-metrics" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -func init() { - // Disable go-metrics metrics in tests, since they start unsupervised - // goroutines that trip the leak tester. Calling Stop on the metric is not - // sufficient, as that does not wait for the goroutine. - metrics.UseNilMetrics = true -} - const wsCallTimeout = 5 * time.Second type myTestHandler struct { diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 42f78c01f..f6c5217bf 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "testing" "time" abciclient "github.com/tendermint/tendermint/abci/client" @@ -57,8 +58,8 @@ func makeAddrs() (p2pAddr, rpcAddr string) { return fmt.Sprintf(addrTemplate, randPort()), fmt.Sprintf(addrTemplate, randPort()) } -func CreateConfig(testName string) (*config.Config, error) { - c, err := config.ResetTestRoot(testName) +func CreateConfig(t *testing.T, testName string) (*config.Config, error) { + c, err := config.ResetTestRoot(t.TempDir(), testName) if err != nil { return nil, err } diff --git a/rust-spec/fastsync/README.md b/rust-spec/fastsync/README.md deleted file mode 100644 index 0f96d6a90..000000000 --- a/rust-spec/fastsync/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Fast Sync - -Deprecated see [tendermint/docs/tendermint-core/block-sync](https://github.com/tendermint/tendermint/blob/master/docs/tendermint-core/block-sync/README.md) diff --git a/rust-spec/lightclient/README.md b/rust-spec/lightclient/README.md deleted file mode 100644 index b9fcce4d8..000000000 --- a/rust-spec/lightclient/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Light Clients - -Deprecated see [spec/light-client](../../spec/light-client) diff --git a/rust-spec/lightclient/verification/README.md b/rust-spec/lightclient/verification/README.md deleted file mode 100644 index bc5deb2db..000000000 --- a/rust-spec/lightclient/verification/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Verification - -Deprecated see [spec/light-client/verification](../../../spec/light-client/verification/README.md) diff --git a/spec/README.md b/spec/README.md index dfb722ef9..037180e29 100644 --- a/spec/README.md +++ b/spec/README.md @@ -55,6 +55,10 @@ please submit them to our [bug bounty](https://tendermint.com/security)! - [Write-Ahead Log](./consensus/wal.md): Details about how the consensus engine preserves data and recovers from crash failures +### Ivy Proofs + +- [Ivy Proofs](./ivy-proofs/README.md) + ## Overview Tendermint provides Byzantine Fault Tolerant State Machine Replication using diff --git a/spec/abci++/README.md b/spec/abci++/README.md index 5aecc06cf..b8b75f46b 100644 --- a/spec/abci++/README.md +++ b/spec/abci++/README.md @@ -21,7 +21,7 @@ Thus, Tendermint always sends the `Request*` messages and receives the `Response in return. All ABCI++ messages and methods are defined in -[protocol buffers](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto). +[protocol buffers](https://github.com/tendermint/tendermint/blob/master/proto/spec/abci/types.proto). This allows Tendermint to run with applications written in many programming languages. This specification is split as follows: diff --git a/spec/blockchain/blockchain.md b/spec/blockchain/blockchain.md deleted file mode 100644 index fcc080ee7..000000000 --- a/spec/blockchain/blockchain.md +++ /dev/null @@ -1,3 +0,0 @@ -# Blockchain - -Deprecated see [core/data_structures.md](../core/data_structures.md) diff --git a/spec/blockchain/encoding.md b/spec/blockchain/encoding.md deleted file mode 100644 index aa2c9ab3f..000000000 --- a/spec/blockchain/encoding.md +++ /dev/null @@ -1,3 +0,0 @@ -# Encoding - -Deprecated see [core/data_structures.md](../core/encoding.md) diff --git a/spec/blockchain/readme.md b/spec/blockchain/readme.md deleted file mode 100644 index 10ad46690..000000000 --- a/spec/blockchain/readme.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -order: 1 -parent: - title: Blockchain - order: false ---- - -# Blockchain - -This section describes the core types and functionality of the Tendermint protocol implementation. - -[Core Data Structures](../core/data_structures.md) -[Encoding](../core/encoding.md) -[State](../core/state.md) diff --git a/spec/blockchain/state.md b/spec/blockchain/state.md deleted file mode 100644 index f4f1d9525..000000000 --- a/spec/blockchain/state.md +++ /dev/null @@ -1,3 +0,0 @@ -# State - -Deprecated see [core/state.md](../core/state.md) diff --git a/spec/consensus/proposer-based-timestamp/README.md b/spec/consensus/proposer-based-timestamp/README.md index 6f52099c2..8e3acf9d6 100644 --- a/spec/consensus/proposer-based-timestamp/README.md +++ b/spec/consensus/proposer-based-timestamp/README.md @@ -146,7 +146,7 @@ The full solution is detailed and formalized in the [Protocol Specification][alg [proposertla]: ./tla/TendermintPBT_001_draft.tla -[bfttime]: https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md +[bfttime]: https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md [arXiv]: https://arxiv.org/pdf/1807.04938.pdf [issue353]: https://github.com/tendermint/spec/issues/353 diff --git a/spec/consensus/proposer-based-timestamp/pbts-algorithm_002_draft.md b/spec/consensus/proposer-based-timestamp/pbts-algorithm_002_draft.md index 8e60c0138..f2ec14036 100644 --- a/spec/consensus/proposer-based-timestamp/pbts-algorithm_002_draft.md +++ b/spec/consensus/proposer-based-timestamp/pbts-algorithm_002_draft.md @@ -144,5 +144,5 @@ Back to [main document][main]. [sysmodel]: ./pbts-sysmodel_002_draft.md -[bfttime]: https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md +[bfttime]: https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md [arXiv]: https://arxiv.org/pdf/1807.04938.pdf diff --git a/ivy-proofs/Dockerfile b/spec/ivy-proofs/Dockerfile similarity index 100% rename from ivy-proofs/Dockerfile rename to spec/ivy-proofs/Dockerfile diff --git a/ivy-proofs/README.md b/spec/ivy-proofs/README.md similarity index 100% rename from ivy-proofs/README.md rename to spec/ivy-proofs/README.md diff --git a/ivy-proofs/abstract_tendermint.ivy b/spec/ivy-proofs/abstract_tendermint.ivy similarity index 100% rename from ivy-proofs/abstract_tendermint.ivy rename to spec/ivy-proofs/abstract_tendermint.ivy diff --git a/ivy-proofs/accountable_safety_1.ivy b/spec/ivy-proofs/accountable_safety_1.ivy similarity index 100% rename from ivy-proofs/accountable_safety_1.ivy rename to spec/ivy-proofs/accountable_safety_1.ivy diff --git a/ivy-proofs/accountable_safety_2.ivy b/spec/ivy-proofs/accountable_safety_2.ivy similarity index 100% rename from ivy-proofs/accountable_safety_2.ivy rename to spec/ivy-proofs/accountable_safety_2.ivy diff --git a/ivy-proofs/check_proofs.sh b/spec/ivy-proofs/check_proofs.sh similarity index 100% rename from ivy-proofs/check_proofs.sh rename to spec/ivy-proofs/check_proofs.sh diff --git a/ivy-proofs/classic_safety.ivy b/spec/ivy-proofs/classic_safety.ivy similarity index 100% rename from ivy-proofs/classic_safety.ivy rename to spec/ivy-proofs/classic_safety.ivy diff --git a/ivy-proofs/count_lines.sh b/spec/ivy-proofs/count_lines.sh similarity index 100% rename from ivy-proofs/count_lines.sh rename to spec/ivy-proofs/count_lines.sh diff --git a/ivy-proofs/docker-compose.yml b/spec/ivy-proofs/docker-compose.yml similarity index 99% rename from ivy-proofs/docker-compose.yml rename to spec/ivy-proofs/docker-compose.yml index 1d4a8ffe1..e0612d4b1 100644 --- a/ivy-proofs/docker-compose.yml +++ b/spec/ivy-proofs/docker-compose.yml @@ -5,4 +5,3 @@ services: volumes: - ./:/home/user/tendermint-proof:ro - ./output:/home/user/tendermint-proof/output:rw - diff --git a/ivy-proofs/domain_model.ivy b/spec/ivy-proofs/domain_model.ivy similarity index 100% rename from ivy-proofs/domain_model.ivy rename to spec/ivy-proofs/domain_model.ivy diff --git a/ivy-proofs/network_shim.ivy b/spec/ivy-proofs/network_shim.ivy similarity index 100% rename from ivy-proofs/network_shim.ivy rename to spec/ivy-proofs/network_shim.ivy diff --git a/ivy-proofs/output/.gitignore b/spec/ivy-proofs/output/.gitignore similarity index 100% rename from ivy-proofs/output/.gitignore rename to spec/ivy-proofs/output/.gitignore diff --git a/ivy-proofs/tendermint.ivy b/spec/ivy-proofs/tendermint.ivy similarity index 100% rename from ivy-proofs/tendermint.ivy rename to spec/ivy-proofs/tendermint.ivy diff --git a/ivy-proofs/tendermint_test.ivy b/spec/ivy-proofs/tendermint_test.ivy similarity index 100% rename from ivy-proofs/tendermint_test.ivy rename to spec/ivy-proofs/tendermint_test.ivy diff --git a/spec/light-client/supervisor/supervisor_002_draft.md b/spec/light-client/supervisor/supervisor_002_draft.md index 4300b8044..691196ac5 100644 --- a/spec/light-client/supervisor/supervisor_002_draft.md +++ b/spec/light-client/supervisor/supervisor_002_draft.md @@ -45,8 +45,8 @@ able to verify anything. Cross-checking this trusted block with providers upon initialization is helpful for ensuring that the node is responsive and correctly configured but does not increase trust since proving a conflicting block is a -[light client attack](https://github.com/tendermint/spec/blob/master/spec/light-client/detection/detection_003_reviewed.md#tmbc-lc-attack1) -and not just a [bogus](https://github.com/tendermint/spec/blob/master/spec/light-client/detection/detection_003_reviewed.md#tmbc-bogus1) block could result in +[light client attack](https://github.com/tendermint/tendermint/blob/master/spec/light-client/detection/detection_003_reviewed.md#tmbc-lc-attack1) +and not just a [bogus](https://github.com/tendermint/tendermint/blob/master/spec/light-client/detection/detection_003_reviewed.md#tmbc-bogus1) block could result in performing backwards verification beyond the trusted period, thus a fruitless endeavour. diff --git a/spec/p2p/messages/consensus.md b/spec/p2p/messages/consensus.md index 19c103750..7a56231e6 100644 --- a/spec/p2p/messages/consensus.md +++ b/spec/p2p/messages/consensus.md @@ -30,7 +30,7 @@ next block in the blockchain should be. Vote is sent to vote for some block (or to inform others that a process does not vote in the current round). Vote is defined in the -[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockidd) +[Blockchain](https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md#blockidd) section and contains validator's information (validator address and index), height and round for which the vote is sent, vote type, blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index ad840b6a1..a7348ecd9 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -145,24 +145,27 @@ func (app *Application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} } -// DeliverTx implements ABCI. -func (app *Application) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - key, value, err := parseTx(req.Tx) - if err != nil { - panic(err) // shouldn't happen since we verified it in CheckTx +// FinalizeBlock implements ABCI. +func (app *Application) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock { + var txs = make([]*abci.ResponseDeliverTx, len(req.Txs)) + + for i, tx := range req.Txs { + key, value, err := parseTx(tx) + if err != nil { + panic(err) // shouldn't happen since we verified it in CheckTx + } + app.state.Set(key, value) + + txs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK} } - app.state.Set(key, value) - return abci.ResponseDeliverTx{Code: code.CodeTypeOK} -} -// EndBlock implements ABCI. -func (app *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { valUpdates, err := app.validatorUpdates(uint64(req.Height)) if err != nil { panic(err) } - return abci.ResponseEndBlock{ + return abci.ResponseFinalizeBlock{ + Txs: txs, ValidatorUpdates: valUpdates, Events: []abci.Event{ { diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index 0e0e66baa..74233fe66 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) diff --git a/test/fuzz/mempool/fuzz_test.go b/test/fuzz/mempool/fuzz_test.go index 8af0326dd..69f34db64 100644 --- a/test/fuzz/mempool/fuzz_test.go +++ b/test/fuzz/mempool/fuzz_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/require" + mempool "github.com/tendermint/tendermint/test/fuzz/mempool" ) diff --git a/test/fuzz/p2p/secretconnection/fuzz_test.go b/test/fuzz/p2p/secretconnection/fuzz_test.go index 1f3757aa0..6fe19b03b 100644 --- a/test/fuzz/p2p/secretconnection/fuzz_test.go +++ b/test/fuzz/p2p/secretconnection/fuzz_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/test/fuzz/p2p/secretconnection" ) diff --git a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go index 41911e725..8a34da8a6 100644 --- a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go +++ b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/test/fuzz/rpc/jsonrpc/server" ) diff --git a/types/block.go b/types/block.go index a5e9d05d5..d6e45af6a 100644 --- a/types/block.go +++ b/types/block.go @@ -329,7 +329,7 @@ func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) // NOTE: changes to the Header should be duplicated in: // - header.Hash() // - abci.Header -// - https://github.com/tendermint/spec/blob/master/spec/blockchain/blockchain.md +// - https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md type Header struct { // basic block info Version version.Consensus `json:"version"` @@ -518,7 +518,8 @@ func (h *Header) StringIndented(indent string) string { indent, h.LastResultsHash, indent, h.EvidenceHash, indent, h.ProposerAddress, - indent, h.Hash()) + indent, h.Hash(), + ) } // ToProto converts Header to protobuf diff --git a/types/events.go b/types/events.go index bf9d74f53..3a3f64fd0 100644 --- a/types/events.go +++ b/types/events.go @@ -114,8 +114,7 @@ type EventDataNewBlock struct { Block *Block `json:"block"` BlockID BlockID `json:"block_id"` - ResultBeginBlock abci.ResponseBeginBlock `json:"result_begin_block"` - ResultEndBlock abci.ResponseEndBlock `json:"result_end_block"` + ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` } // TypeTag implements the required method of jsontypes.Tagged. @@ -124,9 +123,8 @@ func (EventDataNewBlock) TypeTag() string { return "tendermint/event/NewBlock" } type EventDataNewBlockHeader struct { Header Header `json:"header"` - NumTxs int64 `json:"num_txs,string"` // Number of txs in a block - ResultBeginBlock abci.ResponseBeginBlock `json:"result_begin_block"` - ResultEndBlock abci.ResponseEndBlock `json:"result_end_block"` + NumTxs int64 `json:"num_txs,string"` // Number of txs in a block + ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` } // TypeTag implements the required method of jsontypes.Tagged. diff --git a/types/genesis_test.go b/types/genesis_test.go index 1045b7065..99227ad3b 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -126,7 +126,7 @@ func TestBasicGenesisDoc(t *testing.T) { } func TestGenesisSaveAs(t *testing.T) { - tmpfile, err := os.CreateTemp("", "genesis") + tmpfile, err := os.CreateTemp(t.TempDir(), "genesis") require.NoError(t, err) defer os.Remove(tmpfile.Name()) diff --git a/types/node_info.go b/types/node_info.go index 57aced054..fd47816e2 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -82,10 +82,10 @@ func (info NodeInfo) Validate() error { } // Validate Version - if len(info.Version) > 0 && - (!tmstrings.IsASCIIText(info.Version) || tmstrings.ASCIITrim(info.Version) == "") { - - return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got %v", info.Version) + if len(info.Version) > 0 { + if ver, err := tmstrings.ASCIITrim(info.Version); err != nil || ver == "" { + return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got, %q [%s]", info.Version, ver) + } } // Validate Channels - ensure max and check for duplicates. @@ -101,8 +101,7 @@ func (info NodeInfo) Validate() error { channels[ch] = struct{}{} } - // Validate Moniker. - if !tmstrings.IsASCIIText(info.Moniker) || tmstrings.ASCIITrim(info.Moniker) == "" { + if m, err := tmstrings.ASCIITrim(info.Moniker); err != nil || m == "" { return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker) } @@ -116,8 +115,10 @@ func (info NodeInfo) Validate() error { } // XXX: Should we be more strict about address formats? rpcAddr := other.RPCAddress - if len(rpcAddr) > 0 && (!tmstrings.IsASCIIText(rpcAddr) || tmstrings.ASCIITrim(rpcAddr) == "") { - return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) + if len(rpcAddr) > 0 { + if a, err := tmstrings.ASCIITrim(rpcAddr); err != nil || a == "" { + return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) + } } return nil diff --git a/types/node_info_test.go b/types/node_info_test.go index c14570c96..110c67fc3 100644 --- a/types/node_info_test.go +++ b/types/node_info_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/version" @@ -80,15 +81,18 @@ func TestNodeInfoValidate(t *testing.T) { assert.NoError(t, ni.Validate()) for _, tc := range testCases { - ni := testNodeInfo(t, nodeKeyID, name) - ni.Channels = channels - tc.malleateNodeInfo(&ni) - err := ni.Validate() - if tc.expectErr { - assert.Error(t, err, tc.testName) - } else { - assert.NoError(t, err, tc.testName) - } + t.Run(tc.testName, func(t *testing.T) { + ni := testNodeInfo(t, nodeKeyID, name) + ni.Channels = channels + tc.malleateNodeInfo(&ni) + err := ni.Validate() + if tc.expectErr { + assert.Error(t, err, tc.testName) + } else { + assert.NoError(t, err, tc.testName) + } + }) + } } diff --git a/types/params.go b/types/params.go index eaa91195f..017ac5d15 100644 --- a/types/params.go +++ b/types/params.go @@ -79,7 +79,7 @@ type VersionParams struct { // SynchronyParams influence the validity of block timestamps. // For more information on the relationship of the synchrony parameters to // block validity, see the Proposer-Based Timestamps specification: -// https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/README.md +// https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md type SynchronyParams struct { Precision time.Duration `json:"precision,string"` MessageDelay time.Duration `json:"message_delay,string"` diff --git a/types/proposal.go b/types/proposal.go index a4009eea2..818bb5c8e 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -89,7 +89,7 @@ func (p *Proposal) ValidateBasic() error { // localtime <= proposedBlockTime + MsgDelay + Precision // // For more information on the meaning of 'timely', see the proposer-based timestamp specification: -// https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp +// https://github.com/tendermint/tendermint/tree/master/spec/consensus/proposer-based-timestamp func (p *Proposal) IsTimely(recvTime time.Time, sp SynchronyParams, round int32) bool { // The message delay values are scaled as rounds progress. // Every 10 rounds, the message delay is doubled to allow consensus to diff --git a/types/validator_test.go b/types/validator_test.go index 1e29787fd..b19317453 100644 --- a/types/validator_test.go +++ b/types/validator_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" ) diff --git a/types/vote.go b/types/vote.go index 0a3ff2bef..7333f98fc 100644 --- a/types/vote.go +++ b/types/vote.go @@ -326,6 +326,22 @@ func (vote *Vote) ToProto() *tmproto.Vote { } } +func VotesToProto(votes []*Vote) []*tmproto.Vote { + if votes == nil { + return nil + } + + res := make([]*tmproto.Vote, 0, len(votes)) + for _, vote := range votes { + v := vote.ToProto() + // protobuf crashes when serializing "repeated" fields with nil elements + if v != nil { + res = append(res, v) + } + } + return res +} + func VoteExtensionFromProto(pext *tmproto.VoteExtension) VoteExtension { ext := VoteExtension{} if pext != nil { diff --git a/types/vote_set.go b/types/vote_set.go index 03c287666..bb675e110 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -226,6 +226,10 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok return nil, false } +func (voteSet *VoteSet) GetVotes() []*Vote { + return voteSet.votes +} + // Assumes signature is valid. // If conflicting vote exists, returns it. func (voteSet *VoteSet) addVerifiedVote(