Browse Source

Merge branch 'master' into wb/abci-prepare-proposal-synchronize

pull/8094/head
William Banfield 3 years ago
committed by GitHub
parent
commit
6da708049f
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 565 additions and 536 deletions
  1. +18
    -16
      .github/workflows/markdown-links.yml
  2. +0
    -24
      .github/workflows/proto-check.yml
  3. +0
    -64
      .github/workflows/proto-dockerfile.yml
  4. +21
    -0
      .github/workflows/proto-lint.yml
  5. +25
    -3
      CONTRIBUTING.md
  6. +35
    -27
      Makefile
  7. +1
    -0
      README.md
  8. +7
    -12
      buf.gen.yaml
  9. +3
    -0
      buf.work.yaml
  10. +1
    -1
      docs/nodes/configuration.md
  11. +261
    -0
      docs/rfc/rfc-015-abci++-tx-mutation.md
  12. +1
    -1
      go.mod
  13. +2
    -1
      go.sum
  14. +7
    -2
      internal/consensus/common_test.go
  15. +0
    -3
      internal/consensus/reactor_test.go
  16. +25
    -12
      internal/consensus/state.go
  17. +5
    -5
      internal/libs/autofile/autofile.go
  18. +93
    -134
      internal/p2p/pex/reactor.go
  19. +1
    -1
      internal/p2p/pex/reactor_test.go
  20. +2
    -1
      node/node.go
  21. +5
    -4
      node/setup.go
  22. +0
    -20
      proto/Dockerfile
  23. +7
    -0
      proto/buf.lock
  24. +6
    -11
      proto/buf.yaml
  25. +2
    -0
      proto/tendermint/blocksync/types.proto
  26. +2
    -0
      proto/tendermint/consensus/types.proto
  27. +2
    -0
      proto/tendermint/consensus/wal.proto
  28. +2
    -0
      proto/tendermint/crypto/keys.proto
  29. +2
    -0
      proto/tendermint/crypto/proof.proto
  30. +2
    -0
      proto/tendermint/libs/bits/types.proto
  31. +2
    -0
      proto/tendermint/mempool/types.proto
  32. +2
    -0
      proto/tendermint/p2p/conn.proto
  33. +2
    -0
      proto/tendermint/p2p/pex.proto
  34. +2
    -0
      proto/tendermint/p2p/types.proto
  35. +2
    -0
      proto/tendermint/statesync/types.proto
  36. +2
    -0
      proto/tendermint/types/block.proto
  37. +2
    -0
      proto/tendermint/types/canonical.proto
  38. +2
    -0
      proto/tendermint/types/events.proto
  39. +2
    -0
      proto/tendermint/types/evidence.proto
  40. +2
    -0
      proto/tendermint/types/params.proto
  41. +1
    -1
      proto/tendermint/types/types.proto
  42. +2
    -0
      proto/tendermint/types/validator.proto
  43. +2
    -0
      proto/tendermint/version/types.proto
  44. +0
    -44
      scripts/protocgen.sh
  45. +1
    -1
      scripts/protopackage.sh
  46. +1
    -1
      spec/abci++/abci++_basic_concepts_002_draft.md
  47. +0
    -147
      third_party/proto/gogoproto/gogo.proto

+ 18
- 16
.github/workflows/markdown-links.yml View File

@ -1,17 +1,19 @@
name: Check Markdown links
# TODO: Re-enable when https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/126 lands.
on:
push:
branches:
- master
pull_request:
branches: [master]
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
with:
check-modified-files-only: 'yes'
#name: Check Markdown links
#
#on:
# push:
# branches:
# - master
# pull_request:
# branches: [master]
#
#jobs:
# markdown-link-check:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v3
# - uses: gaurav-nelson/github-action-markdown-link-check@v1.0.13
# with:
# check-modified-files-only: 'yes'

+ 0
- 24
.github/workflows/proto-check.yml View File

@ -1,24 +0,0 @@
name: Proto Check
# Protobuf runs buf (https://buf.build/) lint and check-breakage
# This workflow is only run when a file in the proto directory
# has been modified.
on:
workflow_dispatch: # allow running workflow manually
pull_request:
paths:
- "proto/*"
jobs:
proto-lint:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v3
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v3
- name: check-breakage
run: make proto-check-breaking-ci

+ 0
- 64
.github/workflows/proto-dockerfile.yml View File

@ -1,64 +0,0 @@
# This workflow (re)builds and pushes a Docker image containing the
# protobuf build tools used by the other workflows.
#
# When making changes that require updates to the builder image, you
# should merge the updates first and wait for this workflow to complete,
# so that the changes will be available for the dependent workflows.
#
name: Build & Push Proto Builder Image
on:
pull_request:
paths:
- "proto/*"
push:
branches:
- master
paths:
- "proto/*"
schedule:
# run this job once a month to recieve any go or buf updates
- cron: "0 9 1 * *"
env:
REGISTRY: ghcr.io
IMAGE_NAME: tendermint/docker-build-proto
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Check out and assign tags
id: prep
run: |
DOCKER_IMAGE="${REGISTRY}/${IMAGE_NAME}"
VERSION=noop
if [[ "$GITHUB_REF" == "refs/tags/*" ]]; then
VERSION="${GITHUB_REF#refs/tags/}"
elif [[ "$GITHUB_REF" == "refs/heads/*" ]]; then
VERSION="$(echo "${GITHUB_REF#refs/heads/}" | sed -r 's#/+#-#g')"
if [[ "${{ github.event.repository.default_branch }}" = "$VERSION" ]]; then
VERSION=latest
fi
fi
TAGS="${DOCKER_IMAGE}:${VERSION}"
echo ::set-output name=tags::"${TAGS}"
- name: Set up docker buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Log in to the container registry
uses: docker/login-action@v1.14.1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v2.9.0
with:
context: ./proto
file: ./proto/Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}

+ 21
- 0
.github/workflows/proto-lint.yml View File

@ -0,0 +1,21 @@
name: Protobuf Lint
on:
pull_request:
paths:
- 'proto/**'
push:
branches:
- master
paths:
- 'proto/**'
jobs:
lint:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.1.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'proto'

+ 25
- 3
CONTRIBUTING.md View File

@ -105,11 +105,33 @@ specify exactly the dependency you want to update, eg.
## Protobuf
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
across Tendermint Core.
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
To generate proto stubs, lint, and check protos for breaking changes, you will
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
of the repository, run:
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`.
```bash
# Lint all of the .proto files in proto/tendermint
make proto-lint
# Check if any of your local changes (prior to committing to the Git repository)
# are breaking
make proto-check-breaking
# Generate Go code from the .proto files in proto/tendermint
make proto-gen
```
To automatically format `.proto` files, you will need
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
installed, you can run:
```bash
make proto-format
```
### Visual Studio Code


+ 35
- 27
Makefile View File

@ -13,8 +13,6 @@ endif
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE)
CGO_ENABLED ?= 0
# handle nostrip
@ -73,47 +71,57 @@ install:
$(BUILDDIR)/:
mkdir -p $@
# The Docker image containing the generator, formatter, and linter.
# This is generated by proto/Dockerfile. To update tools, make changes
# there and run the Build & Push Proto Builder Image workflow.
IMAGE := ghcr.io/tendermint/docker-build-proto:latest
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE)
HTTPS_GIT := https://github.com/tendermint/tendermint.git
###############################################################################
### Protobuf ###
###############################################################################
proto-all: proto-lint proto-check-breaking
.PHONY: proto-all
check-proto-deps:
ifeq (,$(shell which buf))
$(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.")
endif
ifeq (,$(shell which protoc-gen-gogofaster))
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
endif
.PHONY: check-proto-deps
check-proto-format-deps:
ifeq (,$(shell which clang-format))
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
endif
.PHONY: check-proto-format-deps
proto-gen:
proto-gen: check-proto-deps
@echo "Generating Protobuf files"
@$(DOCKER_PROTO_BUILDER) buf generate --template=./buf.gen.yaml --config ./buf.yaml
@buf generate
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
.PHONY: proto-gen
# TODO: Should be removed when work on ABCI++ is complete.
# For more information, see https://github.com/tendermint/tendermint/issues/8066
abci-proto-gen:
./scripts/abci-gen.sh
.PHONY: abci-proto-gen
proto-lint:
@$(DOCKER_PROTO_BUILDER) buf lint --error-format=json --config ./buf.yaml
# These targets are provided for convenience and are intended for local
# execution only.
proto-lint: check-proto-deps
@echo "Linting Protobuf files"
@buf lint
.PHONY: proto-lint
proto-format:
proto-format: check-proto-format-deps
@echo "Formatting Protobuf files"
@$(DOCKER_PROTO_BUILDER) find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
.PHONY: proto-format
proto-check-breaking:
@$(DOCKER_PROTO_BUILDER) buf breaking --against .git --config ./buf.yaml
proto-check-breaking: check-proto-deps
@echo "Checking for breaking changes in Protobuf files against local branch"
@echo "Note: This is only useful if your changes have not yet been committed."
@echo " Otherwise read up on buf's \"breaking\" command usage:"
@echo " https://docs.buf.build/breaking/usage"
@buf breaking --against ".git"
.PHONY: proto-check-breaking
proto-check-breaking-ci:
@$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT) --config ./buf.yaml
.PHONY: proto-check-breaking-ci
# TODO: Should be removed when work on ABCI++ is complete.
# For more information, see https://github.com/tendermint/tendermint/issues/8066
abci-proto-gen:
./scripts/abci-gen.sh
.PHONY: abci-proto-gen
###############################################################################
### Build ABCI ###


+ 1
- 0
README.md View File

@ -127,6 +127,7 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap
- [Terra](https://www.terra.money/)
- [Celestia](https://celestia.org/)
- [Anoma](https://anoma.network/)
- [Vocdoni](https://docs.vocdoni.io/)
### Research


+ 7
- 12
buf.gen.yaml View File

@ -1,14 +1,9 @@
# The version of the generation template (required).
# The only currently-valid value is v1beta1.
version: v1beta1
# The plugins to run.
version: v1
plugins:
# The name of the plugin.
- name: gogofaster
# The directory where the generated proto output will be written.
# The directory is relative to where the generation tool was run.
out: proto
# Set options to assign import paths to the well-known types
# and to enable service generation.
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
out: ./proto/
opt:
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
- plugins=grpc
- paths=source_relative

+ 3
- 0
buf.work.yaml View File

@ -0,0 +1,3 @@
version: v1
directories:
- proto

+ 1
- 1
docs/nodes/configuration.md View File

@ -594,7 +594,7 @@ This section will cover settings within the p2p section of the `config.toml`.
- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on.
- `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id.
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced.
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config parameters being deprecated and/or replaced.
We will cover the new and deprecated parameters below.
### New Parameters


+ 261
- 0
docs/rfc/rfc-015-abci++-tx-mutation.md View File

@ -0,0 +1,261 @@
# RFC 015: ABCI++ TX Mutation
## Changelog
- 23-Feb-2022: Initial draft (@williambanfield).
- 28-Feb-2022: Revised draft (@williambanfield).
## Abstract
A previous version of the ABCI++ specification detailed a mechanism for proposers to replace transactions
in the proposed block. This scheme required the proposer to construct new transactions
and mark these new transactions as replacing other removed transactions. The specification
was ambiguous as to how the replacement may be communicated to peer nodes.
This RFC discusses issues with this mechanism and possible solutions.
## Background
### What is the proposed change?
A previous version of the ABCI++ specification proposed mechanisms for adding, removing, and replacing
transactions in a proposed block. To replace a transaction, the application running
`ProcessProposal` could mark a transaction as replaced by other application-supplied
transactions by returning a new transaction marked with the `ADDED` flag setting
the `new_hashes` field of the removed transaction to contain the list of transaction hashes
that replace it. In that previous specification for ABCI++, the full use of the
`new_hashes` field is left somewhat ambiguous. At present, these hashes are not
gossiped and are not eventually included in the block to signal replacement to
other nodes. The specification did indicate that the transactions specified in
the `new_hashes` field will be removed from the mempool but it's not clear how
peer nodes will learn about them.
### What systems would be affected by adding transaction replacement?
The 'transaction' is a central building block of a Tendermint blockchain, so adding
a mechanism for transaction replacement would require changes to many aspects of Tendermint.
The following is a rough list of the functionality that this mechanism would affect:
#### Transaction indexing
Tendermint's indexer stores transactions and transaction results using the hash of the executed
transaction [as the key][tx-result-index] and the ABCI results and transaction bytes as the value.
To allow transaction replacement, the replaced transactions would need to stored as well in the
indexer, likely as a mapping of original transaction to list of transaction hashes that replaced
the original transaction.
#### Transaction inclusion proofs
The result of a transaction query includes a Merkle proof of the existence of the
transaction in the block chain. This [proof is built][inclusion-proof] as a merkle tree
of the hashes of all of the transactions in the block where the queried transaction was executed.
To allow transaction replacement, these proofs would need to be updated to prove
that a replaced transaction was included by replacement in the block.
#### RPC-based transaction query parameters and results
Tendermint's RPC allows clients to retrieve information about transactions via the
`/tx_search` and `/tx` RPC endpoints.
RPC query results containing replaced transactions would need to be updated to include
information on replaced transactions, either by returning results for all of the replaced
transactions, or by including a response with just the hashes of the replaced transactions
which clients could proceed to query individually.
#### Mempool transaction removal
Additional logic would need to be added to the Tendermint mempool to clear out replaced
transactions after each block is executed. Tendermint currently removes executed transactions
from the mempool, so this would be a pretty straightforward change.
## Discussion
### What value may be added to Tendermint by introducing transaction replacement?
Transaction replacement would would enable applications to aggregate or disaggregate transactions.
For aggregation, a set of transactions that all related work, such as transferring
tokens between the same two accounts, could be replaced with a single transaction,
i.e. one that transfers a single sum from one account to the other.
Applications that make frequent use of aggregation may be able to achieve a higher throughput.
Aggregation would decrease the space occupied by a single client-submitted transaction in the block, allowing
more client-submitted transactions to be executed per block.
For disaggregation, a very complex transaction could be split into multiple smaller transactions.
This may be useful if an application wishes to perform more fine-grained indexing on intermediate parts
of a multi-part transaction.
### Drawbacks to transaction replacement
Transaction replacement would require updating and shimming many of the places that
Tendermint records and exposes information about executed transactions. While
systems within Tendermint could be updated to account for transaction replacement,
such a system would leave new issues and rough edges.
#### No way of guaranteeing correct replacement
If a user issues a transaction to the network and the transaction is replaced, the
user has no guarantee that the replacement was correct. For example, suppose a set of users issue
transactions A, B, and C and they are all aggregated into a new transaction, D.
There is nothing guaranteeing that D was constructed correctly from the inputs.
The only way for users to ensure D is correct would be if D contained all of the
information of its constituent transactions, in which case, nothing is really gained by the replacement.
#### Replacement transactions not signed by submitter
Abstractly, Tendermint simply views transactions as a ball of bytes and therefore
should be fine with replacing one for another. However, many applications require
that transactions submitted to the chain be signed by some private key to authenticate
and authorize the transaction. Replaced transactions could not be signed by the
submitter, only by the application node. Therefore, any use of transaction replacement
could not contain authorization from the submitter and would either need to grant
application-submitted transactions power to perform application logic on behalf
of a user without their consent.
Granting this power to application-submitted transactions would be very dangerous
and therefore might not be of much value to application developers.
Transaction replacement might only be really safe in the case of application-submitted
transactions or for transactions that require no authorization. For such transactions,
it's quite not quite clear what the utility of replacement is: the application can already
generate any transactions that it wants. The fact that such a transaction was a replacement
is not particularly relevant to participants in the chain since the application is
merely replacing its own transactions.
#### New vector for censorship
Depending on the implementation, transaction replacement may allow a node signal
to the rest of the chain that some transaction should no longer be considered for execution.
Honest nodes will use the replacement mechanism to signal that a transaction has been aggregated.
Malicious nodes will be granted a new vector for censoring transactions.
There is no guarantee that a replaced transactions is actually executed at all.
A malicious node could censor a transaction by simply listing it as replaced.
Honest nodes seeing the replacement would flush the transaction from their mempool
and not execute or propose it it in later blocks.
### Transaction tracking implementations
This section discusses possible ways to flesh out the implementation of transaction replacement.
Specifically, this section proposes a few alternative ways that Tendermint blockchains could
track and store transaction replacements.
#### Include transaction replacements in the block
One option to track transaction replacement is to include information on the
transaction replacement within the block. An additional structure may be added
the block of the following form:
```proto
message Block {
...
repeated Replacement replacements = 5;
}
message Replacement {
bytes included_tx_key = 1;
repeated bytes replaced_txs_keys = 2;
}
```
Applications executing `PrepareProposal` would return the list of replacements and
Tendermint would include an encoding of these replacements in the block that is gossiped
and committed.
Tendermint's transaction indexing would include a new mapping for each replaced transaction
key to the committed transaction.
Transaction inclusion proofs would be updated to include these additional new transaction
keys in the Merkle tree and queries for transaction hashes that were replaced would return
information indicating that the transaction was replaced along with the hash of the
transaction that replaced it.
Block validation of gossiped blocks would be updated to check that each of the
`included_txs_key` matches the hash of some transaction in the proposed block.
Implementing the changes described in this section would allow Tendermint to gossip
and index transaction replacements as part of block propagation. These changes would
still require the application to certify that the replacements were valid. This
validation may be performed in one of two ways:
1. **Applications optimistically trust that the proposer performed a legitimate replacement.**
In this validation scheme, applications would not verify that the substitution
is valid during consensus and instead simply trust that the proposer is correct.
This would have the drawback of allowing a malicious proposer to remove transactions
it did not want executed.
2. **Applications completely validate transaction replacement.**
In this validation scheme, applications that allow replacement would check that
each listed replaced transaction was correctly reflected in the replacement transaction.
In order to perform such validation, the node would need to have the replaced transactions
locally. This could be accomplished one of a few ways: by querying the mempool,
by adding an additional p2p gossip channel for transaction replacements, or by including the replaced transactions
in the block. Replacement validation via mempool querying would require the node
to have received all of the replaced transactions in the mempool which is far from
guaranteed. Adding an additional gossip channel would make gossiping replaced transactions
a requirement for consensus to proceed, since all nodes would need to receive all replacement
messages before considering a block valid. Finally, including replaced transactions in
the block seems to obviate any benefit gained from performing a transaction replacement
since the replaced transaction and the original transactions would now both appear in the block.
#### Application defined transaction replacement
An additional option for allowing transaction replacement is to leave it entirely as a responsibility
of the application. The `PrepareProposal` ABCI++ call allows for applications to add
new transactions to a proposed block. Applications that wished to implement a transaction
replacement mechanism would be free to do so without the newly defined `new_hashes` field.
Applications wishing to implement transaction replacement would add the aggregated
transactions in the `PrepareProposal` response, and include one additional bookkeeping
transaction that listed all of the replacements, with a similar scheme to the `new_hashes`
field described in ABCI++. This new bookkeeping transaction could be used by the
application to determine which transactions to clear from the mempool in future calls
to `CheckTx`.
The meaning of any transaction in the block is completely opaque to Tendermint,
so applications performing this style of replacement would not be able to have the replacement
reflected in any most of Tendermint's transaction tracking mechanisms, such as transaction indexing
and the `/tx` endpoint.
#### Application defined Tx Keys
Tendermint currently uses cryptographic hashes, SHA256, as a key for each transaction.
As noted in the section on systems that would require changing, this key is used
to identify the transaction in the mempool, in the indexer, and within the RPC system.
An alternative approach to allowing `ProcessProposal` to specify a set of transaction
replacements would be instead to allow the application to specify an additional key or set
of keys for each transaction during `ProcessProposal`. This new `secondary_keys` set
would be included in the block and therefore gossiped during block propagation.
Additional RPC endpoints could be exposed to query by the application-defined keys.
Applications wishing to implement replacement would leverage this new field by providing the
replaced transaction hashes as the `secondary_keys` and checking their validity during
`ProcessProposal`. During `RecheckTx` the application would then be responsible for
clearing out transactions that matched the `secondary_keys`.
It is worth noting that something like this would be possible without `secondary_keys`.
An application wishing to implement a system like this one could define a replacement
transaction, as discussed in the section on application-defined transaction replacement,
and use a custom [ABCI event type][abci-event-type] to communicate that the replacement should
be indexed within Tendermint's ABCI event indexing.
### Complexity to value-add tradeoff
It is worth remarking that adding a system like this may introduce a decent amount
of new complexity into Tendermint. An approach that leaves much of the replacement
logic to Tendermint would require altering the core transaction indexing and querying
data. In many of the cases listed, a system for transaction replacement is possible
without explicitly defining it as part of `PrepareProposal`. Since applications
can now add transactions during `PrepareProposal` they can and should leverage this
functionality to include additional bookkeeping transactions in the block. It may
be worth encouraging applications to discover new and interesting ways to leverage this
power instead of immediately solving the problem for them.
### References
[inclusion-proof]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/types/tx.go#L67
[tx-serach-result]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/rpc/coretypes/responses.go#L267
[tx-rpc-func]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/rpc/core/tx.go#L21
[tx-result-index]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/state/indexer/tx/kv/kv.go#L90
[abci-event-type]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/abci/types/types.pb.go#L3168

+ 1
- 1
go.mod View File

@ -26,7 +26,7 @@ require (
github.com/rs/cors v1.8.2
github.com/rs/zerolog v1.26.1
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.3.0
github.com/spf13/cobra v1.4.0
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.7.0
github.com/tendermint/tm-db v0.6.6


+ 2
- 1
go.sum View File

@ -940,8 +940,9 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=


+ 7
- 2
internal/consensus/common_test.go View File

@ -69,6 +69,9 @@ func configSetup(t *testing.T) *config.Config {
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) })
walDir := filepath.Dir(cfg.Consensus.WalFile())
ensureDir(t, walDir, 0700)
return cfg
}
@ -787,6 +790,7 @@ func makeConsensusState(
configOpts ...func(*config.Config),
) ([]*State, cleanupFunc) {
t.Helper()
tempDir := t.TempDir()
valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
@ -801,7 +805,7 @@ func makeConsensusState(
blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
thisConfig, err := ResetConfig(tempDir, fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
configRootDirs = append(configRootDirs, thisConfig.RootDir)
@ -810,7 +814,8 @@ func makeConsensusState(
opt(thisConfig)
}
ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
walDir := filepath.Dir(thisConfig.Consensus.WalFile())
ensureDir(t, walDir, 0700)
app := kvstore.NewApplication()
closeFuncs = append(closeFuncs, app.Close)


+ 0
- 3
internal/consensus/reactor_test.go View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"os"
"path"
"sync"
"testing"
"time"
@ -466,7 +465,6 @@ func TestReactorWithEvidence(t *testing.T) {
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := kvstore.NewApplication()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
@ -564,7 +562,6 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
c.Consensus.CreateEmptyBlocks = false
},
)
t.Cleanup(cleanup)
rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock


+ 25
- 12
internal/consensus/state.go View File

@ -20,6 +20,7 @@ import (
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/jsontypes"
"github.com/tendermint/tendermint/internal/libs/autofile"
sm "github.com/tendermint/tendermint/internal/state"
tmevents "github.com/tendermint/tendermint/libs/events"
"github.com/tendermint/tendermint/libs/log"
@ -869,15 +870,27 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
defer func() {
if r := recover(); r != nil {
cs.logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack()))
// stop gracefully
//
// NOTE: We most probably shouldn't be running any further when there is
// some unexpected panic. Some unknown error happened, and so we don't
// know if that will result in the validator signing an invalid thing. It
// might be worthwhile to explore a mechanism for manual resuming via
// some console or secure RPC system, but for now, halting the chain upon
// unexpected consensus bugs sounds like the better option.
// Make a best-effort attempt to close the WAL, but otherwise do not
// attempt to gracefully terminate. Once consensus has irrecoverably
// failed, any additional progress we permit the node to make may
// complicate diagnosing and recovering from the failure.
onExit(cs)
// Re-panic to ensure the node terminates.
//
// TODO(creachadair): In ordinary operation, the WAL autofile should
// never be closed. This only happens during shutdown and production
// nodes usually halt by panicking. Many existing tests, however,
// assume a clean shutdown is possible. Prior to #8111, we were
// swallowing the panic in receiveRoutine, making that appear to
// work. Filtering this specific error is slightly risky, but should
// affect only unit tests. In any case, not re-panicking here only
// preserves the pre-existing behavior for this one error type.
if err, ok := r.(error); ok && errors.Is(err, autofile.ErrAutoFileClosed) {
return
}
panic(r)
}
}()
@ -906,8 +919,8 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
case mi := <-cs.internalMsgQueue:
err := cs.wal.WriteSync(mi) // NOTE: fsync
if err != nil {
panic(fmt.Sprintf(
"failed to write %v msg to consensus WAL due to %v; check your file system and restart the node",
panic(fmt.Errorf(
"failed to write %v msg to consensus WAL due to %w; check your file system and restart the node",
mi, err,
))
}
@ -1906,8 +1919,8 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
// restart).
endMsg := EndHeightMessage{height}
if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync
panic(fmt.Sprintf(
"failed to write %v msg to consensus WAL due to %v; check your file system and restart the node",
panic(fmt.Errorf(
"failed to write %v msg to consensus WAL due to %w; check your file system and restart the node",
endMsg, err,
))
}


+ 5
- 5
internal/libs/autofile/autofile.go View File

@ -41,9 +41,9 @@ const (
autoFilePerms = os.FileMode(0600)
)
// errAutoFileClosed is reported when operations attempt to use an autofile
// ErrAutoFileClosed is reported when operations attempt to use an autofile
// after it has been closed.
var errAutoFileClosed = errors.New("autofile is closed")
var ErrAutoFileClosed = errors.New("autofile is closed")
// AutoFile automatically closes and re-opens file for writing. The file is
// automatically setup to close itself every 1s and upon receiving SIGHUP.
@ -155,7 +155,7 @@ func (af *AutoFile) Write(b []byte) (n int, err error) {
af.mtx.Lock()
defer af.mtx.Unlock()
if af.closed {
return 0, fmt.Errorf("write: %w", errAutoFileClosed)
return 0, fmt.Errorf("write: %w", ErrAutoFileClosed)
}
if af.file == nil {
@ -174,7 +174,7 @@ func (af *AutoFile) Write(b []byte) (n int, err error) {
func (af *AutoFile) Sync() error {
return af.withLock(func() error {
if af.closed {
return fmt.Errorf("sync: %w", errAutoFileClosed)
return fmt.Errorf("sync: %w", ErrAutoFileClosed)
} else if af.file == nil {
return nil // nothing to sync
}
@ -207,7 +207,7 @@ func (af *AutoFile) Size() (int64, error) {
af.mtx.Lock()
defer af.mtx.Unlock()
if af.closed {
return 0, fmt.Errorf("size: %w", errAutoFileClosed)
return 0, fmt.Errorf("size: %w", ErrAutoFileClosed)
}
if af.file == nil {


+ 93
- 134
internal/p2p/pex/reactor.go View File

@ -3,14 +3,12 @@ package pex
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/libs/service"
protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
@ -42,7 +40,7 @@ const (
minReceiveRequestInterval = 100 * time.Millisecond
// the maximum amount of addresses that can be included in a response
maxAddresses uint16 = 100
maxAddresses = 100
// How long to wait when there are no peers available before trying again
noAvailablePeersWaitPeriod = 1 * time.Second
@ -100,15 +98,8 @@ type Reactor struct {
// minReceiveRequestInterval).
lastReceivedRequests map[types.NodeID]time.Time
// keep track of how many new peers to existing peers we have received to
// extrapolate the size of the network
newPeers uint32
totalPeers uint32
// discoveryRatio is the inverse ratio of new peers to old peers squared.
// This is multiplied by the minimum duration to calculate how long to wait
// between each request.
discoveryRatio float32
// the total number of unique peers added
totalPeers int
}
// NewReactor returns a reference to a new reactor.
@ -156,16 +147,6 @@ func (r *Reactor) OnStop() {}
// processPexCh implements a blocking event loop where we listen for p2p
// Envelope messages from the pexCh.
func (r *Reactor) processPexCh(ctx context.Context) {
timer := time.NewTimer(0)
defer timer.Stop()
r.mtx.Lock()
var (
duration = r.calculateNextRequestTime()
err error
)
r.mtx.Unlock()
incoming := make(chan *p2p.Envelope)
go func() {
defer close(incoming)
@ -179,36 +160,51 @@ func (r *Reactor) processPexCh(ctx context.Context) {
}
}()
// Initially, we will request peers quickly to bootstrap. This duration
// will be adjusted upward as knowledge of the network grows.
var nextPeerRequest = minReceiveRequestInterval
timer := time.NewTimer(0)
defer timer.Stop()
for {
timer.Reset(duration)
timer.Reset(nextPeerRequest)
select {
case <-ctx.Done():
return
// outbound requests for new peers
case <-timer.C:
duration, err = r.sendRequestForPeers(ctx)
if err != nil {
// Send a request for more peer addresses.
if err := r.sendRequestForPeers(ctx); err != nil {
return
// TODO(creachadair): Do we really want to stop processing the PEX
// channel just because of an error here?
}
// inbound requests for new peers or responses to requests sent by this
// reactor
// Note we do not update the poll timer upon making a request, only
// when we receive an update that updates our priors.
case envelope, ok := <-incoming:
if !ok {
return
return // channel closed
}
duration, err = r.handleMessage(ctx, r.pexCh.ID, envelope)
// A request from another peer, or a response to one of our requests.
dur, err := r.handlePexMessage(ctx, envelope)
if err != nil {
r.logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err)
r.logger.Error("failed to process message",
"ch_id", r.pexCh.ID, "envelope", envelope, "err", err)
if serr := r.pexCh.SendError(ctx, p2p.PeerError{
NodeID: envelope.From,
Err: err,
}); serr != nil {
return
}
} else if dur != 0 {
// We got a useful result; update the poll timer.
nextPeerRequest = dur
}
}
}
}
@ -228,19 +224,20 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) {
}
// handlePexMessage handles envelopes sent from peers on the PexChannel.
// If an update was received, a new polling interval is returned; otherwise the
// duration is 0.
func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope) (time.Duration, error) {
logger := r.logger.With("peer", envelope.From)
switch msg := envelope.Message.(type) {
case *protop2p.PexRequest:
// check if the peer hasn't sent a prior request too close to this one
// in time
// Verify that this peer hasn't sent us another request too recently.
if err := r.markPeerRequest(envelope.From); err != nil {
return time.Minute, err
return 0, err
}
// request peers from the peer manager and parse the NodeAddresses into
// URL strings
// Fetch peers from the peer manager, convert NodeAddresses into URL
// strings, and send them back to the caller.
nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses)
pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses))
for idx, addr := range nodeAddresses {
@ -248,28 +245,24 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope)
URL: addr.String(),
}
}
if err := r.pexCh.Send(ctx, p2p.Envelope{
return 0, r.pexCh.Send(ctx, p2p.Envelope{
To: envelope.From,
Message: &protop2p.PexResponse{Addresses: pexAddresses},
}); err != nil {
return 0, err
}
})
return time.Second, nil
case *protop2p.PexResponse:
// check if the response matches a request that was made to that peer
// Verify that this response corresponds to one of our pending requests.
if err := r.markPeerResponse(envelope.From); err != nil {
return time.Minute, err
return 0, err
}
// check the size of the response
if len(msg.Addresses) > int(maxAddresses) {
return 10 * time.Minute, fmt.Errorf("peer sent too many addresses (max: %d, got: %d)",
maxAddresses,
len(msg.Addresses),
)
// Verify that the response does not exceed the safety limit.
if len(msg.Addresses) > maxAddresses {
return 0, fmt.Errorf("peer sent too many addresses (%d > maxiumum %d)",
len(msg.Addresses), maxAddresses)
}
var numAdded int
for _, pexAddress := range msg.Addresses {
peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL)
if err != nil {
@ -278,45 +271,19 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope)
added, err := r.peerManager.Add(peerAddress)
if err != nil {
logger.Error("failed to add PEX address", "address", peerAddress, "err", err)
continue
}
if added {
r.newPeers++
numAdded++
logger.Debug("added PEX address", "address", peerAddress)
}
r.totalPeers++
}
return 10 * time.Minute, nil
default:
return time.Second, fmt.Errorf("received unknown message: %T", msg)
}
}
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (duration time.Duration, err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e)
r.logger.Error(
"recovering from processing message panic",
"err", err,
"stack", string(debug.Stack()),
)
}
}()
r.logger.Debug("received PEX message", "peer", envelope.From)
return r.calculateNextRequestTime(numAdded), nil
switch chID {
case p2p.ChannelID(PexChannel):
duration, err = r.handlePexMessage(ctx, envelope)
default:
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
return 0, fmt.Errorf("received unknown message: %T", msg)
}
return
}
// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we
@ -338,95 +305,87 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
}
}
// sendRequestForPeers pops the first peerID off the list and sends the
// peer a request for more peer addresses. The function then moves the
// peer into the requestsSent bucket and calculates when the next request
// time should be
func (r *Reactor) sendRequestForPeers(ctx context.Context) (time.Duration, error) {
// sendRequestForPeers chooses a peer from the set of available peers and sends
// that peer a request for more peer addresses. The chosen peer is moved into
// the requestsSent bucket so that we will not attempt to contact them again
// until they've replied or updated.
func (r *Reactor) sendRequestForPeers(ctx context.Context) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if len(r.availablePeers) == 0 {
// no peers are available
r.logger.Debug("no available peers to send request to, waiting...")
return noAvailablePeersWaitPeriod, nil
r.logger.Debug("no available peers to send a PEX request to (retrying)")
return nil
}
var peerID types.NodeID
// use range to get a random peer.
// Select an arbitrary peer from the available set.
var peerID types.NodeID
for peerID = range r.availablePeers {
break
}
// send out the pex request
if err := r.pexCh.Send(ctx, p2p.Envelope{
To: peerID,
Message: &protop2p.PexRequest{},
}); err != nil {
return 0, err
return err
}
// remove the peer from the abvailable peers list and mark it in the requestsSent map
// Move the peer from available to pending.
delete(r.availablePeers, peerID)
r.requestsSent[peerID] = struct{}{}
dur := r.calculateNextRequestTime()
r.logger.Debug("peer request sent", "next_request_time", dur)
return dur, nil
return nil
}
// calculateNextRequestTime implements something of a proportional controller
// to estimate how often the reactor should be requesting new peer addresses.
// The dependent variable in this calculation is the ratio of new peers to
// all peers that the reactor receives. The interval is thus calculated as the
// inverse squared. In the beginning, all peers should be new peers.
// We expect this ratio to be near 1 and thus the interval to be as short
// as possible. As the node becomes more familiar with the network the ratio of
// new nodes will plummet to a very small number, meaning the interval expands
// to its upper bound.
// calculateNextRequestTime selects how long we should wait before attempting
// to send out another request for peer addresses.
//
// This implements a simplified proportional control mechanism to poll more
// often when our knowledge of the network is incomplete, and less often as our
// knowledge grows. To estimate our knowledge of the network, we use the
// fraction of "new" peers (addresses we have not previously seen) to the total
// so far observed. When we first join the network, this fraction will be close
// to 1, meaning most new peers are "new" to us, and as we discover more peers,
// the fraction will go toward zero.
//
// CONTRACT: The caller must hold r.mtx exclusively when calling this method.
func (r *Reactor) calculateNextRequestTime() time.Duration {
// check if the peer store is full. If so then there is no need
// to send peer requests too often
// The minimum interval will be minReceiveRequestInterval to ensure we will not
// request from any peer more often than we would allow them to do from us.
func (r *Reactor) calculateNextRequestTime(added int) time.Duration {
r.mtx.Lock()
defer r.mtx.Unlock()
r.totalPeers += added
// If the peer store is nearly full, wait the maximum interval.
if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 {
r.logger.Debug("peer manager near full ratio, sleeping...",
r.logger.Debug("Peer manager is nearly full",
"sleep_period", fullCapacityInterval, "ratio", ratio)
return fullCapacityInterval
}
// baseTime represents the shortest interval that we can send peer requests
// in. For example if we have 10 peers and we can't send a message to the
// same peer every 500ms, then we can send a request every 50ms. In practice
// we use a safety margin of 2, ergo 100ms
peers := tmmath.MinInt(len(r.availablePeers), 50)
baseTime := minReceiveRequestInterval
if peers > 0 {
baseTime = minReceiveRequestInterval * 2 / time.Duration(peers)
// If there are no available peers to query, poll less aggressively.
if len(r.availablePeers) == 0 {
r.logger.Debug("No available peers to send a PEX request",
"sleep_period", noAvailablePeersWaitPeriod)
return noAvailablePeersWaitPeriod
}
if r.totalPeers > 0 || r.discoveryRatio == 0 {
// find the ratio of new peers. NOTE: We add 1 to both sides to avoid
// divide by zero problems
ratio := float32(r.totalPeers+1) / float32(r.newPeers+1)
// square the ratio in order to get non linear time intervals
// NOTE: The longest possible interval for a network with 100 or more peers
// where a node is connected to 50 of them is 2 minutes.
r.discoveryRatio = ratio * ratio
r.newPeers = 0
r.totalPeers = 0
}
// NOTE: As ratio is always >= 1, discovery ratio is >= 1. Therefore we don't need to worry
// about the next request time being less than the minimum time
return baseTime * time.Duration(r.discoveryRatio)
// Reaching here, there are available peers to query and the peer store
// still has space. Estimate our knowledge of the network from the latest
// update and choose a new interval.
base := float64(minReceiveRequestInterval) / float64(len(r.availablePeers))
multiplier := float64(r.totalPeers+1) / float64(added+1) // +1 to avert zero division
return time.Duration(base*multiplier*multiplier) + minReceiveRequestInterval
}
func (r *Reactor) markPeerRequest(peer types.NodeID) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok {
if time.Now().Before(lastRequestTime.Add(minReceiveRequestInterval)) {
return fmt.Errorf("peer sent a request too close after a prior one. Minimum interval: %v",
minReceiveRequestInterval)
if d := time.Since(lastRequestTime); d < minReceiveRequestInterval {
return fmt.Errorf("peer %v sent PEX request too soon (%v < minimum %v)",
peer, d, minReceiveRequestInterval)
}
}
r.lastReceivedRequests[peer] = time.Now()


+ 1
- 1
internal/p2p/pex/reactor_test.go View File

@ -96,7 +96,7 @@ func TestReactorSendsRequestsTooOften(t *testing.T) {
peerErr := <-r.pexErrCh
require.Error(t, peerErr.Err)
require.Empty(t, r.pexOutCh)
require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one")
require.Contains(t, peerErr.Err.Error(), "sent PEX request too soon")
require.Equal(t, badNode, peerErr.NodeID)
}


+ 2
- 1
node/node.go View File

@ -274,9 +274,10 @@ func makeNode(
return nil, combineCloseError(err, makeCloser(closers))
}
evReactor, evPool, err := createEvidenceReactor(ctx,
evReactor, evPool, edbCloser, err := createEvidenceReactor(ctx,
cfg, dbProvider, stateStore, blockStore, peerManager, router, logger, nodeMetrics.evidence, eventBus,
)
closers = append(closers, edbCloser)
if err != nil {
return nil, combineCloseError(err, makeCloser(closers))
}


+ 5
- 4
node/setup.go View File

@ -220,11 +220,12 @@ func createEvidenceReactor(
logger log.Logger,
metrics *evidence.Metrics,
eventBus *eventbus.EventBus,
) (*evidence.Reactor, *evidence.Pool, error) {
) (*evidence.Reactor, *evidence.Pool, closer, error) {
evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg})
if err != nil {
return nil, nil, fmt.Errorf("unable to initialize evidence db: %w", err)
return nil, nil, func() error { return nil }, fmt.Errorf("unable to initialize evidence db: %w", err)
}
dbCloser := evidenceDB.Close
logger = logger.With("module", "evidence")
@ -238,10 +239,10 @@ func createEvidenceReactor(
evidencePool,
)
if err != nil {
return nil, nil, fmt.Errorf("creating evidence reactor: %w", err)
return nil, nil, dbCloser, fmt.Errorf("creating evidence reactor: %w", err)
}
return evidenceReactor, evidencePool, nil
return evidenceReactor, evidencePool, dbCloser, nil
}
func createConsensusReactor(


+ 0
- 20
proto/Dockerfile View File

@ -1,20 +0,0 @@
# This Dockerfile defines an image containing tools for linting, formatting,
# and compiling the Tendermint protos.
FROM golang:1.17-alpine
# Install a commonly used set of programs for use with our protos.
# clang-extra-tools is included here because it provides clang-format,
# used to format the .proto files.
RUN apk add --no-cache build-base clang-extra-tools curl git
ENV GOLANG_PROTOBUF_VERSION=1.3.1 \
GOGO_PROTOBUF_VERSION=1.3.2
# Retrieve the go protoc programs and copy them into the PATH
RUN go install github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} && \
go install github.com/gogo/protobuf/protoc-gen-gogo@v${GOGO_PROTOBUF_VERSION} && \
go install github.com/gogo/protobuf/protoc-gen-gogofaster@v${GOGO_PROTOBUF_VERSION} && \
mv "$(go env GOPATH)"/bin/* /usr/local/bin/
# Copy the 'buf' program out of the buildbuf/buf container.
COPY --from=bufbuild/buf:latest /usr/local/bin/* /usr/local/bin/

+ 7
- 0
proto/buf.lock View File

@ -0,0 +1,7 @@
# Generated by buf. DO NOT EDIT.
version: v1
deps:
- remote: buf.build
owner: gogo
repository: protobuf
commit: 4df00b267f944190a229ce3695781e99

buf.yaml → proto/buf.yaml View File


+ 2
- 0
proto/tendermint/blocksync/types.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.blocksync;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync";
import "tendermint/types/block.proto";
// BlockRequest requests a block for a specific height


+ 2
- 0
proto/tendermint/consensus/types.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.consensus;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus";
import "gogoproto/gogo.proto";
import "tendermint/types/types.proto";
import "tendermint/libs/bits/types.proto";


+ 2
- 0
proto/tendermint/consensus/wal.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.consensus;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus";
import "gogoproto/gogo.proto";
import "tendermint/consensus/types.proto";
import "tendermint/types/events.proto";


+ 2
- 0
proto/tendermint/crypto/keys.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.crypto;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto";
import "gogoproto/gogo.proto";
// PublicKey defines the keys available for use with Tendermint Validators


+ 2
- 0
proto/tendermint/crypto/proof.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.crypto;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto";
import "gogoproto/gogo.proto";
message Proof {


+ 2
- 0
proto/tendermint/libs/bits/types.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.libs.bits;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/libs/bits";
message BitArray {
int64 bits = 1;
repeated uint64 elems = 2;


+ 2
- 0
proto/tendermint/mempool/types.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.mempool;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/mempool";
message Txs {
repeated bytes txs = 1;
}


+ 2
- 0
proto/tendermint/p2p/conn.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.p2p;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p";
import "gogoproto/gogo.proto";
import "tendermint/crypto/keys.proto";


+ 2
- 0
proto/tendermint/p2p/pex.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.p2p;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p";
import "gogoproto/gogo.proto";
message PexAddress {


+ 2
- 0
proto/tendermint/p2p/types.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.p2p;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p";
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";


+ 2
- 0
proto/tendermint/statesync/types.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.statesync;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync";
import "gogoproto/gogo.proto";
import "tendermint/types/types.proto";
import "tendermint/types/params.proto";


+ 2
- 0
proto/tendermint/types/block.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.types;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
import "gogoproto/gogo.proto";
import "tendermint/types/types.proto";
import "tendermint/types/evidence.proto";


+ 2
- 0
proto/tendermint/types/canonical.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.types;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
import "gogoproto/gogo.proto";
import "tendermint/types/types.proto";
import "google/protobuf/timestamp.proto";


+ 2
- 0
proto/tendermint/types/events.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.types;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
message EventDataRoundState {
int64 height = 1;
int32 round = 2;


+ 2
- 0
proto/tendermint/types/evidence.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.types;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "tendermint/types/types.proto";


+ 2
- 0
proto/tendermint/types/params.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.types;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
import "gogoproto/gogo.proto";
import "google/protobuf/duration.proto";


+ 1
- 1
proto/tendermint/types/types.proto View File

@ -1,7 +1,7 @@
syntax = "proto3";
package tendermint.types;
option go_package = "github.com/tendermint/tendermint/types";
option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";


+ 2
- 0
proto/tendermint/types/validator.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.types;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
import "gogoproto/gogo.proto";
import "tendermint/crypto/keys.proto";


+ 2
- 0
proto/tendermint/version/types.proto View File

@ -1,6 +1,8 @@
syntax = "proto3";
package tendermint.version;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/version";
import "gogoproto/gogo.proto";
// Consensus captures the consensus rules for processing a block in the


+ 0
- 44
scripts/protocgen.sh View File

@ -1,44 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# By default, this script runs against the latest commit to the master branch
# in the Tendermint spec repository. To use this script with a different version
# of the spec repository, run it with the $VERS environment variable set to the
# desired branch name or commit hash from the spec repo.
: ${VERS:=master}
echo "fetching proto files"
# Get shortened ref of commit
REF=$(curl -H "Accept: application/vnd.github.v3.sha" -qL \
"https://api.github.com/repos/tendermint/spec/commits/${VERS}" \
| cut -c -7)
readonly OUTDIR="tendermint-spec-${REF}"
curl -qL "https://api.github.com/repos/tendermint/spec/tarball/${REF}" | tar -xzf - ${OUTDIR}/
cp -r ${OUTDIR}/proto/tendermint/* ./proto/tendermint
cp -r ${OUTDIR}/third_party/** ./third_party
MODNAME="$(go list -m)"
find ./proto/tendermint -name '*.proto' -not -path "./proto/tendermint/abci/types.proto" \
-exec sh ./scripts/protopackage.sh {} "$MODNAME" ';'
# For historical compatibility, the abci file needs to get a slightly different import name
# so that it can be moved into the ./abci/types directory.
sh ./scripts/protopackage.sh ./proto/tendermint/abci/types.proto $MODNAME "abci/types"
buf generate --path proto/tendermint --template ./${OUTDIR}/buf.gen.yaml --config ./${OUTDIR}/buf.yaml
mv ./proto/tendermint/abci/types.pb.go ./abci/types
echo "proto files have been compiled"
echo "removing copied files"
find ${OUTDIR}/proto/tendermint/ -name *.proto \
| sed "s/$OUTDIR\/\(.*\)/\1/g" \
| xargs -I {} rm {}
rm -rf ${OUTDIR}

+ 1
- 1
scripts/protopackage.sh View File

@ -16,7 +16,7 @@ if [[ ! -z "$3" ]]; then
fi
if ! grep -q 'option\s\+go_package\s\+=\s\+.*;' $FNAME; then
if ! grep -q 'option\s\+go_package\s\+=\s\+.*;' $FNAME; then
sed -i "s/\(package tendermint.*\)/\1\n\noption go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME
else
sed -i "s/option\s\+go_package\s\+=\s\+.*;/option go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME


+ 1
- 1
spec/abci++/abci++_basic_concepts_002_draft.md View File

@ -214,7 +214,7 @@ transactions and compute the same results.
Some Applications may choose to execute the blocks that are about to be proposed
(via `PrepareProposal`), or those that the Application is asked to validate
(via `Processproposal`). However, the state changes caused by processing those
(via `ProcessProposal`). However, the state changes caused by processing those
proposed blocks must never replace the previous state until `FinalizeBlock` confirms
the block decided.


+ 0
- 147
third_party/proto/gogoproto/gogo.proto View File

@ -1,147 +0,0 @@
// Protocol Buffers for Go with Gadgets
//
// Copied from https://github.com/gogo/protobuf/blob/master/gogoproto/gogo.proto
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
package gogoproto;
import "google/protobuf/descriptor.proto";
option java_package = "com.google.protobuf";
option java_outer_classname = "GoGoProtos";
option go_package = "github.com/gogo/protobuf/gogoproto";
extend google.protobuf.EnumOptions {
optional bool goproto_enum_prefix = 62001;
optional bool goproto_enum_stringer = 62021;
optional bool enum_stringer = 62022;
optional string enum_customname = 62023;
optional bool enumdecl = 62024;
}
extend google.protobuf.EnumValueOptions {
optional string enumvalue_customname = 66001;
}
extend google.protobuf.FileOptions {
optional bool goproto_getters_all = 63001;
optional bool goproto_enum_prefix_all = 63002;
optional bool goproto_stringer_all = 63003;
optional bool verbose_equal_all = 63004;
optional bool face_all = 63005;
optional bool gostring_all = 63006;
optional bool populate_all = 63007;
optional bool stringer_all = 63008;
optional bool onlyone_all = 63009;
optional bool equal_all = 63013;
optional bool description_all = 63014;
optional bool testgen_all = 63015;
optional bool benchgen_all = 63016;
optional bool marshaler_all = 63017;
optional bool unmarshaler_all = 63018;
optional bool stable_marshaler_all = 63019;
optional bool sizer_all = 63020;
optional bool goproto_enum_stringer_all = 63021;
optional bool enum_stringer_all = 63022;
optional bool unsafe_marshaler_all = 63023;
optional bool unsafe_unmarshaler_all = 63024;
optional bool goproto_extensions_map_all = 63025;
optional bool goproto_unrecognized_all = 63026;
optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028;
optional bool compare_all = 63029;
optional bool typedecl_all = 63030;
optional bool enumdecl_all = 63031;
optional bool goproto_registration = 63032;
optional bool messagename_all = 63033;
optional bool goproto_sizecache_all = 63034;
optional bool goproto_unkeyed_all = 63035;
}
extend google.protobuf.MessageOptions {
optional bool goproto_getters = 64001;
optional bool goproto_stringer = 64003;
optional bool verbose_equal = 64004;
optional bool face = 64005;
optional bool gostring = 64006;
optional bool populate = 64007;
optional bool stringer = 67008;
optional bool onlyone = 64009;
optional bool equal = 64013;
optional bool description = 64014;
optional bool testgen = 64015;
optional bool benchgen = 64016;
optional bool marshaler = 64017;
optional bool unmarshaler = 64018;
optional bool stable_marshaler = 64019;
optional bool sizer = 64020;
optional bool unsafe_marshaler = 64023;
optional bool unsafe_unmarshaler = 64024;
optional bool goproto_extensions_map = 64025;
optional bool goproto_unrecognized = 64026;
optional bool protosizer = 64028;
optional bool compare = 64029;
optional bool typedecl = 64030;
optional bool messagename = 64033;
optional bool goproto_sizecache = 64034;
optional bool goproto_unkeyed = 64035;
}
extend google.protobuf.FieldOptions {
optional bool nullable = 65001;
optional bool embed = 65002;
optional string customtype = 65003;
optional string customname = 65004;
optional string jsontag = 65005;
optional string moretags = 65006;
optional string casttype = 65007;
optional string castkey = 65008;
optional string castvalue = 65009;
optional bool stdtime = 65010;
optional bool stdduration = 65011;
optional bool wktpointer = 65012;
optional string castrepeated = 65013;
}

Loading…
Cancel
Save