@ -1,17 +1,19 @@ | |||
name: Check Markdown links | |||
# TODO: Re-enable when https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/126 lands. | |||
on: | |||
push: | |||
branches: | |||
- master | |||
pull_request: | |||
branches: [master] | |||
jobs: | |||
markdown-link-check: | |||
runs-on: ubuntu-latest | |||
steps: | |||
- uses: actions/checkout@master | |||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 | |||
with: | |||
check-modified-files-only: 'yes' | |||
#name: Check Markdown links | |||
# | |||
#on: | |||
# push: | |||
# branches: | |||
# - master | |||
# pull_request: | |||
# branches: [master] | |||
# | |||
#jobs: | |||
# markdown-link-check: | |||
# runs-on: ubuntu-latest | |||
# steps: | |||
# - uses: actions/checkout@v3 | |||
# - uses: gaurav-nelson/github-action-markdown-link-check@v1.0.13 | |||
# with: | |||
# check-modified-files-only: 'yes' |
@ -1,24 +0,0 @@ | |||
name: Proto Check | |||
# Protobuf runs buf (https://buf.build/) lint and check-breakage | |||
# This workflow is only run when a file in the proto directory | |||
# has been modified. | |||
on: | |||
workflow_dispatch: # allow running workflow manually | |||
pull_request: | |||
paths: | |||
- "proto/*" | |||
jobs: | |||
proto-lint: | |||
runs-on: ubuntu-latest | |||
timeout-minutes: 4 | |||
steps: | |||
- uses: actions/checkout@v2.4.0 | |||
- name: lint | |||
run: make proto-lint | |||
proto-breakage: | |||
runs-on: ubuntu-latest | |||
timeout-minutes: 4 | |||
steps: | |||
- uses: actions/checkout@v2.4.0 | |||
- name: check-breakage | |||
run: make proto-check-breaking-ci |
@ -1,64 +0,0 @@ | |||
# This workflow (re)builds and pushes a Docker image containing the | |||
# protobuf build tools used by the other workflows. | |||
# | |||
# When making changes that require updates to the builder image, you | |||
# should merge the updates first and wait for this workflow to complete, | |||
# so that the changes will be available for the dependent workflows. | |||
# | |||
name: Build & Push Proto Builder Image | |||
on: | |||
pull_request: | |||
paths: | |||
- "proto/*" | |||
push: | |||
branches: | |||
- master | |||
paths: | |||
- "proto/*" | |||
schedule: | |||
# run this job once a month to recieve any go or buf updates | |||
- cron: "0 9 1 * *" | |||
env: | |||
REGISTRY: ghcr.io | |||
IMAGE_NAME: tendermint/docker-build-proto | |||
jobs: | |||
build: | |||
runs-on: ubuntu-latest | |||
steps: | |||
- uses: actions/checkout@v2.4.0 | |||
- name: Check out and assign tags | |||
id: prep | |||
run: | | |||
DOCKER_IMAGE="${REGISTRY}/${IMAGE_NAME}" | |||
VERSION=noop | |||
if [[ "$GITHUB_REF" == "refs/tags/*" ]]; then | |||
VERSION="${GITHUB_REF#refs/tags/}" | |||
elif [[ "$GITHUB_REF" == "refs/heads/*" ]]; then | |||
VERSION="$(echo "${GITHUB_REF#refs/heads/}" | sed -r 's#/+#-#g')" | |||
if [[ "${{ github.event.repository.default_branch }}" = "$VERSION" ]]; then | |||
VERSION=latest | |||
fi | |||
fi | |||
TAGS="${DOCKER_IMAGE}:${VERSION}" | |||
echo ::set-output name=tags::"${TAGS}" | |||
- name: Set up docker buildx | |||
uses: docker/setup-buildx-action@v1.6.0 | |||
- name: Log in to the container registry | |||
uses: docker/login-action@v1.13.0 | |||
with: | |||
registry: ${{ env.REGISTRY }} | |||
username: ${{ github.actor }} | |||
password: ${{ secrets.GITHUB_TOKEN }} | |||
- name: Build and publish image | |||
uses: docker/build-push-action@v2.9.0 | |||
with: | |||
context: ./proto | |||
file: ./proto/Dockerfile | |||
push: ${{ github.event_name != 'pull_request' }} | |||
tags: ${{ steps.prep.outputs.tags }} |
@ -0,0 +1,21 @@ | |||
name: Protobuf Lint | |||
on: | |||
pull_request: | |||
paths: | |||
- 'proto/**' | |||
push: | |||
branches: | |||
- master | |||
paths: | |||
- 'proto/**' | |||
jobs: | |||
lint: | |||
runs-on: ubuntu-latest | |||
timeout-minutes: 5 | |||
steps: | |||
- uses: actions/checkout@v3 | |||
- uses: bufbuild/buf-setup-action@v1.1.0 | |||
- uses: bufbuild/buf-lint-action@v1 | |||
with: | |||
input: 'proto' |
@ -1,33 +0,0 @@ | |||
package abciclient | |||
import ( | |||
"fmt" | |||
"github.com/tendermint/tendermint/abci/types" | |||
"github.com/tendermint/tendermint/libs/log" | |||
) | |||
// Creator creates new ABCI clients. | |||
type Creator func(log.Logger) (Client, error) | |||
// NewLocalCreator returns a Creator for the given app, | |||
// which will be running locally. | |||
func NewLocalCreator(app types.Application) Creator { | |||
return func(logger log.Logger) (Client, error) { | |||
return NewLocalClient(logger, app), nil | |||
} | |||
} | |||
// NewRemoteCreator returns a Creator for the given address (e.g. | |||
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you | |||
// want the client to connect before reporting success. | |||
func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator { | |||
return func(log.Logger) (Client, error) { | |||
remoteApp, err := NewClient(logger, addr, transport, mustConnect) | |||
if err != nil { | |||
return nil, fmt.Errorf("failed to connect to proxy: %w", err) | |||
} | |||
return remoteApp, nil | |||
} | |||
} |
@ -1,85 +0,0 @@ | |||
package abciclient_test | |||
import ( | |||
"context" | |||
"fmt" | |||
"testing" | |||
"time" | |||
"math/rand" | |||
"github.com/stretchr/testify/assert" | |||
"github.com/stretchr/testify/require" | |||
abciclient "github.com/tendermint/tendermint/abci/client" | |||
"github.com/tendermint/tendermint/abci/server" | |||
"github.com/tendermint/tendermint/abci/types" | |||
"github.com/tendermint/tendermint/libs/log" | |||
"github.com/tendermint/tendermint/libs/service" | |||
) | |||
func TestProperSyncCalls(t *testing.T) { | |||
ctx, cancel := context.WithCancel(context.Background()) | |||
defer cancel() | |||
app := slowApp{} | |||
logger := log.NewNopLogger() | |||
_, c := setupClientServer(ctx, t, logger, app) | |||
resp := make(chan error, 1) | |||
go func() { | |||
rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{}) | |||
assert.NoError(t, err) | |||
assert.NoError(t, c.Flush(ctx)) | |||
assert.NotNil(t, rsp) | |||
select { | |||
case <-ctx.Done(): | |||
case resp <- c.Error(): | |||
} | |||
}() | |||
select { | |||
case <-time.After(time.Second): | |||
require.Fail(t, "No response arrived") | |||
case err, ok := <-resp: | |||
require.True(t, ok, "Must not close channel") | |||
assert.NoError(t, err, "This should return success") | |||
} | |||
} | |||
func setupClientServer( | |||
ctx context.Context, | |||
t *testing.T, | |||
logger log.Logger, | |||
app types.Application, | |||
) (service.Service, abciclient.Client) { | |||
t.Helper() | |||
// some port between 20k and 30k | |||
port := 20000 + rand.Int31()%10000 | |||
addr := fmt.Sprintf("localhost:%d", port) | |||
s, err := server.NewServer(logger, addr, "socket", app) | |||
require.NoError(t, err) | |||
require.NoError(t, s.Start(ctx)) | |||
t.Cleanup(s.Wait) | |||
c := abciclient.NewSocketClient(logger, addr, true) | |||
require.NoError(t, c.Start(ctx)) | |||
t.Cleanup(c.Wait) | |||
require.True(t, s.IsRunning()) | |||
require.True(t, c.IsRunning()) | |||
return s, c | |||
} | |||
type slowApp struct { | |||
types.BaseApplication | |||
} | |||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock { | |||
time.Sleep(200 * time.Millisecond) | |||
return types.ResponseFinalizeBlock{} | |||
} |
@ -1,10 +1,10 @@ | |||
echo hello | |||
info | |||
commit | |||
deliver_tx "abc" | |||
finalize_block "abc" | |||
info | |||
commit | |||
query "abc" | |||
deliver_tx "def=xyz" | |||
finalize_block "def=xyz" "ghi=123" | |||
commit | |||
query "def" |
@ -1,7 +1,7 @@ | |||
check_tx 0x00 | |||
check_tx 0xff | |||
deliver_tx 0x00 | |||
finalize_block 0x00 | |||
check_tx 0x00 | |||
deliver_tx 0x01 | |||
deliver_tx 0x04 | |||
finalize_block 0x01 | |||
finalize_block 0x04 | |||
info |
@ -0,0 +1,74 @@ | |||
package types_test | |||
import ( | |||
"testing" | |||
"github.com/stretchr/testify/assert" | |||
"github.com/stretchr/testify/require" | |||
abci "github.com/tendermint/tendermint/abci/types" | |||
"github.com/tendermint/tendermint/crypto/merkle" | |||
) | |||
func TestHashAndProveResults(t *testing.T) { | |||
trs := []*abci.ExecTxResult{ | |||
// Note, these tests rely on the first two entries being in this order. | |||
{Code: 0, Data: nil}, | |||
{Code: 0, Data: []byte{}}, | |||
{Code: 0, Data: []byte("one")}, | |||
{Code: 14, Data: nil}, | |||
{Code: 14, Data: []byte("foo")}, | |||
{Code: 14, Data: []byte("bar")}, | |||
} | |||
// Nil and []byte{} should produce the same bytes | |||
bz0, err := trs[0].Marshal() | |||
require.NoError(t, err) | |||
bz1, err := trs[1].Marshal() | |||
require.NoError(t, err) | |||
require.Equal(t, bz0, bz1) | |||
// Make sure that we can get a root hash from results and verify proofs. | |||
rs, err := abci.MarshalTxResults(trs) | |||
require.NoError(t, err) | |||
root := merkle.HashFromByteSlices(rs) | |||
assert.NotEmpty(t, root) | |||
_, proofs := merkle.ProofsFromByteSlices(rs) | |||
for i, tr := range trs { | |||
bz, err := tr.Marshal() | |||
require.NoError(t, err) | |||
valid := proofs[i].Verify(root, bz) | |||
assert.NoError(t, valid, "%d", i) | |||
} | |||
} | |||
func TestHashDeterministicFieldsOnly(t *testing.T) { | |||
tr1 := abci.ExecTxResult{ | |||
Code: 1, | |||
Data: []byte("transaction"), | |||
Log: "nondeterministic data: abc", | |||
Info: "nondeterministic data: abc", | |||
GasWanted: 1000, | |||
GasUsed: 1000, | |||
Events: []abci.Event{}, | |||
Codespace: "nondeterministic.data.abc", | |||
} | |||
tr2 := abci.ExecTxResult{ | |||
Code: 1, | |||
Data: []byte("transaction"), | |||
Log: "nondeterministic data: def", | |||
Info: "nondeterministic data: def", | |||
GasWanted: 1000, | |||
GasUsed: 1000, | |||
Events: []abci.Event{}, | |||
Codespace: "nondeterministic.data.def", | |||
} | |||
r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1}) | |||
require.NoError(t, err) | |||
r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2}) | |||
require.NoError(t, err) | |||
require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2)) | |||
} |
@ -1,14 +1,9 @@ | |||
# The version of the generation template (required). | |||
# The only currently-valid value is v1beta1. | |||
version: v1beta1 | |||
# The plugins to run. | |||
version: v1 | |||
plugins: | |||
# The name of the plugin. | |||
- name: gogofaster | |||
# The directory where the generated proto output will be written. | |||
# The directory is relative to where the generation tool was run. | |||
out: proto | |||
# Set options to assign import paths to the well-known types | |||
# and to enable service generation. | |||
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative | |||
out: ./proto/ | |||
opt: | |||
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types | |||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration | |||
- plugins=grpc | |||
- paths=source_relative |
@ -0,0 +1,3 @@ | |||
version: v1 | |||
directories: | |||
- proto |
@ -0,0 +1,201 @@ | |||
# ADR 081: Protocol Buffers Management | |||
## Changelog | |||
- 2022-02-28: First draft | |||
## Status | |||
Accepted | |||
[Tracking issue](https://github.com/tendermint/tendermint/issues/8121) | |||
## Context | |||
At present, we manage the [Protocol Buffers] schema files ("protos") that define | |||
our wire-level data formats within the Tendermint repository itself (see the | |||
[`proto`](../../proto/) directory). Recently, we have been making use of [Buf], | |||
both locally and in CI, in order to generate Go stubs, and lint and check | |||
`.proto` files for breaking changes. | |||
The version of Buf used at the time of this decision was `v1beta1`, and it was | |||
discussed in [\#7975] and in weekly calls as to whether we should upgrade to | |||
`v1` and harmonize our approach with that used by the Cosmos SDK. The team | |||
managing the Cosmos SDK was primarily interested in having our protos versioned | |||
and easily accessible from the [Buf] registry. | |||
The three main sets of stakeholders for the `.proto` files and their needs, as | |||
currently understood, are as follows. | |||
1. Tendermint needs Go code generated from `.proto` files. | |||
2. Consumers of Tendermint's `.proto` files, specifically projects that want to | |||
interoperate with Tendermint and need to generate code for their own | |||
programming language, want to be able to access these files in a reliable and | |||
efficient way. | |||
3. The Tendermint Core team wants to provide stable interfaces that are as easy | |||
as possible to maintain, on which consumers can depend, and to be able to | |||
notify those consumers promptly when those interfaces change. To this end, we | |||
want to: | |||
1. Prevent any breaking changes from being introduced in minor/patch releases | |||
of Tendermint. Only major version updates should be able to contain | |||
breaking interface changes. | |||
2. Prevent generated code from diverging from the Protobuf schema files. | |||
There was also discussion surrounding the notion of automated documentation | |||
generation and hosting, but it is not clear at this time whether this would be | |||
that valuable to any of our stakeholders. What will, of course, be valuable at | |||
minimum would be better documentation (in comments) of the `.proto` files | |||
themselves. | |||
## Alternative Approaches | |||
### Meeting stakeholders' needs | |||
1. Go stub generation from protos. We could use: | |||
1. [Buf]. This approach has been rather cumbersome up to this point, and it | |||
is not clear what Buf really provides beyond that which `protoc` provides | |||
to justify the additional complexity in configuring Buf for stub | |||
generation. | |||
2. [protoc] - the Protocol Buffers compiler. | |||
2. Notification of breaking changes: | |||
1. Buf in CI for all pull requests to *release* branches only (and not on | |||
`master`). | |||
2. Buf in CI on every pull request to every branch (this was the case at the | |||
time of this decision, and the team decided that the signal-to-noise ratio | |||
for this approach was too low to be of value). | |||
3. `.proto` linting: | |||
1. Buf in CI on every pull request | |||
4. `.proto` formatting: | |||
1. [clang-format] locally and a [clang-format GitHub Action] in CI to check | |||
that files are formatted properly on every pull request. | |||
5. Sharing of `.proto` files in a versioned, reliable manner: | |||
1. Consumers could simply clone the Tendermint repository, check out a | |||
specific commit, tag or branch and manually copy out all of the `.proto` | |||
files they need. This requires no effort from the Tendermint Core team and | |||
will continue to be an option for consumers. The drawback of this approach | |||
is that it requires manual coding/scripting to implement and is brittle in | |||
the face of bigger changes. | |||
2. Uploading our `.proto` files to Buf's registry on every release. This is | |||
by far the most seamless for consumers of our `.proto` files, but requires | |||
the dependency on Buf. This has the additional benefit that the Buf | |||
registry will automatically [generate and host | |||
documentation][buf-docs-gen] for these protos. | |||
3. We could create a process that, upon release, creates a `.zip` file | |||
containing our `.proto` files. | |||
### Popular alternatives to Buf | |||
[Prototool] was not considered as it appears deprecated, and the ecosystem seems | |||
to be converging on Buf at this time. | |||
### Tooling complexity | |||
The more tools we have in our build/CI processes, the more complex and fragile | |||
repository/CI management becomes, and the longer it takes to onboard new team | |||
members. Maintainability is a core concern here. | |||
### Buf sustainability and costs | |||
One of the primary considerations regarding the usage of Buf is whether, for | |||
example, access to its registry will eventually become a | |||
paid-for/subscription-based service and whether this is valuable enough for us | |||
and the ecosystem to pay for such a service. At this time, it appears as though | |||
Buf will never charge for hosting open source projects' protos. | |||
Another consideration was Buf's sustainability as a project - what happens when | |||
their resources run out? Will there be a strong and broad enough open source | |||
community to continue maintaining it? | |||
### Local Buf usage options | |||
Local usage of Buf (i.e. not in CI) can be accomplished in two ways: | |||
1. Installing the relevant tools individually. | |||
2. By way of its [Docker image][buf-docker]. | |||
Local installation of Buf requires developers to manually keep their toolchains | |||
up-to-date. The Docker option comes with a number of complexities, including | |||
how the file system permissions of code generated by a Docker container differ | |||
between platforms (e.g. on Linux, Buf-generated code ends up being owned by | |||
`root`). | |||
The trouble with the Docker-based approach is that we make use of the | |||
[gogoprotobuf] plugin for `protoc`. Continuing to use the Docker-based approach | |||
to using Buf will mean that we will have to continue building our own custom | |||
Docker image with embedded gogoprotobuf. | |||
Along these lines, we could eventually consider coming up with a [Nix]- or | |||
[redo]-based approach to developer tooling to ensure tooling consistency across | |||
the team and for anyone who wants to be able to contribute to Tendermint. | |||
## Decision | |||
1. We will adopt Buf for now for proto generation, linting, breakage checking | |||
and its registry (mainly in CI, with optional usage locally). | |||
2. Failing CI when checking for breaking changes in `.proto` files will only | |||
happen when performing minor/patch releases. | |||
3. Local tooling will be favored over Docker-based tooling. | |||
## Detailed Design | |||
We currently aim to: | |||
1. Update to Buf `v1` to facilitate linting, breakage checking and uploading to | |||
the Buf registry. | |||
2. Configure CI appropriately for proto management: | |||
1. Uploading protos to the Buf registry on every release (e.g. the | |||
[approach][cosmos-sdk-buf-registry-ci] used by the Cosmos SDK). | |||
2. Linting on every pull request (e.g. the | |||
[approach][cosmos-sdk-buf-linting-ci] used by the Cosmos SDK). The linter | |||
passing should be considered a requirement for accepting PRs. | |||
3. Checking for breaking changes in minor/patch version releases and failing | |||
CI accordingly - see [\#8003]. | |||
4. Add [clang-format GitHub Action] to check `.proto` file formatting. Format | |||
checking should be considered a requirement for accepting PRs. | |||
3. Update the Tendermint [`Makefile`](../../Makefile) to primarily facilitate | |||
local Protobuf stub generation, linting, formatting and breaking change | |||
checking. More specifically: | |||
1. This includes removing the dependency on Docker and introducing the | |||
dependency on local toolchain installation. CI-based equivalents, where | |||
relevant, will rely on specific GitHub Actions instead of the Makefile. | |||
2. Go code generation will rely on `protoc` directly. | |||
## Consequences | |||
### Positive | |||
- We will still offer Go stub generation, proto linting and breakage checking. | |||
- Breakage checking will only happen on minor/patch releases to increase the | |||
signal-to-noise ratio in CI. | |||
- Versioned protos will be made available via Buf's registry upon every release. | |||
### Negative | |||
- Developers/contributors will need to install the relevant Protocol | |||
Buffers-related tooling (Buf, gogoprotobuf, clang-format) locally in order to | |||
build, lint, format and check `.proto` files for breaking changes. | |||
### Neutral | |||
## References | |||
- [Protocol Buffers] | |||
- [Buf] | |||
- [\#7975] | |||
- [protoc] - The Protocol Buffers compiler | |||
[Protocol Buffers]: https://developers.google.com/protocol-buffers | |||
[Buf]: https://buf.build/ | |||
[\#7975]: https://github.com/tendermint/tendermint/pull/7975 | |||
[protoc]: https://github.com/protocolbuffers/protobuf | |||
[clang-format]: https://clang.llvm.org/docs/ClangFormat.html | |||
[clang-format GitHub Action]: https://github.com/marketplace/actions/clang-format-github-action | |||
[buf-docker]: https://hub.docker.com/r/bufbuild/buf | |||
[cosmos-sdk-buf-registry-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto-registry.yml | |||
[cosmos-sdk-buf-linting-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto.yml#L15 | |||
[\#8003]: https://github.com/tendermint/tendermint/issues/8003 | |||
[Nix]: https://nixos.org/ | |||
[gogoprotobuf]: https://github.com/gogo/protobuf | |||
[Prototool]: https://github.com/uber/prototool | |||
[buf-docs-gen]: https://docs.buf.build/bsr/documentation | |||
[redo]: https://redo.readthedocs.io/en/latest/ |
@ -0,0 +1,261 @@ | |||
# RFC 015: ABCI++ TX Mutation | |||
## Changelog | |||
- 23-Feb-2022: Initial draft (@williambanfield). | |||
- 28-Feb-2022: Revised draft (@williambanfield). | |||
## Abstract | |||
A previous version of the ABCI++ specification detailed a mechanism for proposers to replace transactions | |||
in the proposed block. This scheme required the proposer to construct new transactions | |||
and mark these new transactions as replacing other removed transactions. The specification | |||
was ambiguous as to how the replacement may be communicated to peer nodes. | |||
This RFC discusses issues with this mechanism and possible solutions. | |||
## Background | |||
### What is the proposed change? | |||
A previous version of the ABCI++ specification proposed mechanisms for adding, removing, and replacing | |||
transactions in a proposed block. To replace a transaction, the application running | |||
`ProcessProposal` could mark a transaction as replaced by other application-supplied | |||
transactions by returning a new transaction marked with the `ADDED` flag setting | |||
the `new_hashes` field of the removed transaction to contain the list of transaction hashes | |||
that replace it. In that previous specification for ABCI++, the full use of the | |||
`new_hashes` field is left somewhat ambiguous. At present, these hashes are not | |||
gossiped and are not eventually included in the block to signal replacement to | |||
other nodes. The specification did indicate that the transactions specified in | |||
the `new_hashes` field will be removed from the mempool but it's not clear how | |||
peer nodes will learn about them. | |||
### What systems would be affected by adding transaction replacement? | |||
The 'transaction' is a central building block of a Tendermint blockchain, so adding | |||
a mechanism for transaction replacement would require changes to many aspects of Tendermint. | |||
The following is a rough list of the functionality that this mechanism would affect: | |||
#### Transaction indexing | |||
Tendermint's indexer stores transactions and transaction results using the hash of the executed | |||
transaction [as the key][tx-result-index] and the ABCI results and transaction bytes as the value. | |||
To allow transaction replacement, the replaced transactions would need to stored as well in the | |||
indexer, likely as a mapping of original transaction to list of transaction hashes that replaced | |||
the original transaction. | |||
#### Transaction inclusion proofs | |||
The result of a transaction query includes a Merkle proof of the existence of the | |||
transaction in the block chain. This [proof is built][inclusion-proof] as a merkle tree | |||
of the hashes of all of the transactions in the block where the queried transaction was executed. | |||
To allow transaction replacement, these proofs would need to be updated to prove | |||
that a replaced transaction was included by replacement in the block. | |||
#### RPC-based transaction query parameters and results | |||
Tendermint's RPC allows clients to retrieve information about transactions via the | |||
`/tx_search` and `/tx` RPC endpoints. | |||
RPC query results containing replaced transactions would need to be updated to include | |||
information on replaced transactions, either by returning results for all of the replaced | |||
transactions, or by including a response with just the hashes of the replaced transactions | |||
which clients could proceed to query individually. | |||
#### Mempool transaction removal | |||
Additional logic would need to be added to the Tendermint mempool to clear out replaced | |||
transactions after each block is executed. Tendermint currently removes executed transactions | |||
from the mempool, so this would be a pretty straightforward change. | |||
## Discussion | |||
### What value may be added to Tendermint by introducing transaction replacement? | |||
Transaction replacement would would enable applications to aggregate or disaggregate transactions. | |||
For aggregation, a set of transactions that all related work, such as transferring | |||
tokens between the same two accounts, could be replaced with a single transaction, | |||
i.e. one that transfers a single sum from one account to the other. | |||
Applications that make frequent use of aggregation may be able to achieve a higher throughput. | |||
Aggregation would decrease the space occupied by a single client-submitted transaction in the block, allowing | |||
more client-submitted transactions to be executed per block. | |||
For disaggregation, a very complex transaction could be split into multiple smaller transactions. | |||
This may be useful if an application wishes to perform more fine-grained indexing on intermediate parts | |||
of a multi-part transaction. | |||
### Drawbacks to transaction replacement | |||
Transaction replacement would require updating and shimming many of the places that | |||
Tendermint records and exposes information about executed transactions. While | |||
systems within Tendermint could be updated to account for transaction replacement, | |||
such a system would leave new issues and rough edges. | |||
#### No way of guaranteeing correct replacement | |||
If a user issues a transaction to the network and the transaction is replaced, the | |||
user has no guarantee that the replacement was correct. For example, suppose a set of users issue | |||
transactions A, B, and C and they are all aggregated into a new transaction, D. | |||
There is nothing guaranteeing that D was constructed correctly from the inputs. | |||
The only way for users to ensure D is correct would be if D contained all of the | |||
information of its constituent transactions, in which case, nothing is really gained by the replacement. | |||
#### Replacement transactions not signed by submitter | |||
Abstractly, Tendermint simply views transactions as a ball of bytes and therefore | |||
should be fine with replacing one for another. However, many applications require | |||
that transactions submitted to the chain be signed by some private key to authenticate | |||
and authorize the transaction. Replaced transactions could not be signed by the | |||
submitter, only by the application node. Therefore, any use of transaction replacement | |||
could not contain authorization from the submitter and would either need to grant | |||
application-submitted transactions power to perform application logic on behalf | |||
of a user without their consent. | |||
Granting this power to application-submitted transactions would be very dangerous | |||
and therefore might not be of much value to application developers. | |||
Transaction replacement might only be really safe in the case of application-submitted | |||
transactions or for transactions that require no authorization. For such transactions, | |||
it's quite not quite clear what the utility of replacement is: the application can already | |||
generate any transactions that it wants. The fact that such a transaction was a replacement | |||
is not particularly relevant to participants in the chain since the application is | |||
merely replacing its own transactions. | |||
#### New vector for censorship | |||
Depending on the implementation, transaction replacement may allow a node signal | |||
to the rest of the chain that some transaction should no longer be considered for execution. | |||
Honest nodes will use the replacement mechanism to signal that a transaction has been aggregated. | |||
Malicious nodes will be granted a new vector for censoring transactions. | |||
There is no guarantee that a replaced transactions is actually executed at all. | |||
A malicious node could censor a transaction by simply listing it as replaced. | |||
Honest nodes seeing the replacement would flush the transaction from their mempool | |||
and not execute or propose it it in later blocks. | |||
### Transaction tracking implementations | |||
This section discusses possible ways to flesh out the implementation of transaction replacement. | |||
Specifically, this section proposes a few alternative ways that Tendermint blockchains could | |||
track and store transaction replacements. | |||
#### Include transaction replacements in the block | |||
One option to track transaction replacement is to include information on the | |||
transaction replacement within the block. An additional structure may be added | |||
the block of the following form: | |||
```proto | |||
message Block { | |||
... | |||
repeated Replacement replacements = 5; | |||
} | |||
message Replacement { | |||
bytes included_tx_key = 1; | |||
repeated bytes replaced_txs_keys = 2; | |||
} | |||
``` | |||
Applications executing `PrepareProposal` would return the list of replacements and | |||
Tendermint would include an encoding of these replacements in the block that is gossiped | |||
and committed. | |||
Tendermint's transaction indexing would include a new mapping for each replaced transaction | |||
key to the committed transaction. | |||
Transaction inclusion proofs would be updated to include these additional new transaction | |||
keys in the Merkle tree and queries for transaction hashes that were replaced would return | |||
information indicating that the transaction was replaced along with the hash of the | |||
transaction that replaced it. | |||
Block validation of gossiped blocks would be updated to check that each of the | |||
`included_txs_key` matches the hash of some transaction in the proposed block. | |||
Implementing the changes described in this section would allow Tendermint to gossip | |||
and index transaction replacements as part of block propagation. These changes would | |||
still require the application to certify that the replacements were valid. This | |||
validation may be performed in one of two ways: | |||
1. **Applications optimistically trust that the proposer performed a legitimate replacement.** | |||
In this validation scheme, applications would not verify that the substitution | |||
is valid during consensus and instead simply trust that the proposer is correct. | |||
This would have the drawback of allowing a malicious proposer to remove transactions | |||
it did not want executed. | |||
2. **Applications completely validate transaction replacement.** | |||
In this validation scheme, applications that allow replacement would check that | |||
each listed replaced transaction was correctly reflected in the replacement transaction. | |||
In order to perform such validation, the node would need to have the replaced transactions | |||
locally. This could be accomplished one of a few ways: by querying the mempool, | |||
by adding an additional p2p gossip channel for transaction replacements, or by including the replaced transactions | |||
in the block. Replacement validation via mempool querying would require the node | |||
to have received all of the replaced transactions in the mempool which is far from | |||
guaranteed. Adding an additional gossip channel would make gossiping replaced transactions | |||
a requirement for consensus to proceed, since all nodes would need to receive all replacement | |||
messages before considering a block valid. Finally, including replaced transactions in | |||
the block seems to obviate any benefit gained from performing a transaction replacement | |||
since the replaced transaction and the original transactions would now both appear in the block. | |||
#### Application defined transaction replacement | |||
An additional option for allowing transaction replacement is to leave it entirely as a responsibility | |||
of the application. The `PrepareProposal` ABCI++ call allows for applications to add | |||
new transactions to a proposed block. Applications that wished to implement a transaction | |||
replacement mechanism would be free to do so without the newly defined `new_hashes` field. | |||
Applications wishing to implement transaction replacement would add the aggregated | |||
transactions in the `PrepareProposal` response, and include one additional bookkeeping | |||
transaction that listed all of the replacements, with a similar scheme to the `new_hashes` | |||
field described in ABCI++. This new bookkeeping transaction could be used by the | |||
application to determine which transactions to clear from the mempool in future calls | |||
to `CheckTx`. | |||
The meaning of any transaction in the block is completely opaque to Tendermint, | |||
so applications performing this style of replacement would not be able to have the replacement | |||
reflected in any most of Tendermint's transaction tracking mechanisms, such as transaction indexing | |||
and the `/tx` endpoint. | |||
#### Application defined Tx Keys | |||
Tendermint currently uses cryptographic hashes, SHA256, as a key for each transaction. | |||
As noted in the section on systems that would require changing, this key is used | |||
to identify the transaction in the mempool, in the indexer, and within the RPC system. | |||
An alternative approach to allowing `ProcessProposal` to specify a set of transaction | |||
replacements would be instead to allow the application to specify an additional key or set | |||
of keys for each transaction during `ProcessProposal`. This new `secondary_keys` set | |||
would be included in the block and therefore gossiped during block propagation. | |||
Additional RPC endpoints could be exposed to query by the application-defined keys. | |||
Applications wishing to implement replacement would leverage this new field by providing the | |||
replaced transaction hashes as the `secondary_keys` and checking their validity during | |||
`ProcessProposal`. During `RecheckTx` the application would then be responsible for | |||
clearing out transactions that matched the `secondary_keys`. | |||
It is worth noting that something like this would be possible without `secondary_keys`. | |||
An application wishing to implement a system like this one could define a replacement | |||
transaction, as discussed in the section on application-defined transaction replacement, | |||
and use a custom [ABCI event type][abci-event-type] to communicate that the replacement should | |||
be indexed within Tendermint's ABCI event indexing. | |||
### Complexity to value-add tradeoff | |||
It is worth remarking that adding a system like this may introduce a decent amount | |||
of new complexity into Tendermint. An approach that leaves much of the replacement | |||
logic to Tendermint would require altering the core transaction indexing and querying | |||
data. In many of the cases listed, a system for transaction replacement is possible | |||
without explicitly defining it as part of `PrepareProposal`. Since applications | |||
can now add transactions during `PrepareProposal` they can and should leverage this | |||
functionality to include additional bookkeeping transactions in the block. It may | |||
be worth encouraging applications to discover new and interesting ways to leverage this | |||
power instead of immediately solving the problem for them. | |||
### References | |||
[inclusion-proof]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/types/tx.go#L67 | |||
[tx-serach-result]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/rpc/coretypes/responses.go#L267 | |||
[tx-rpc-func]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/rpc/core/tx.go#L21 | |||
[tx-result-index]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/state/indexer/tx/kv/kv.go#L90 | |||
[abci-event-type]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/abci/types/types.pb.go#L3168 |
@ -0,0 +1,95 @@ | |||
--- | |||
order: 3 | |||
--- | |||
# PBTS | |||
This document provides an overview of the Proposer-Based Timestamp (PBTS) | |||
algorithm added to Tendermint in the v0.36 release. It outlines the core | |||
functionality as well as the parameters and constraints of the this algorithm. | |||
## Algorithm Overview | |||
The PBTS algorithm defines a way for a Tendermint blockchain to create block | |||
timestamps that are within a reasonable bound of the clocks of the validators on | |||
the network. This replaces the original BFTTime algorithm for timestamp | |||
assignment that relied on the timestamps included in precommit messages. | |||
## Algorithm Parameters | |||
The functionality of the PBTS algorithm is governed by two parameters within | |||
Tendermint. These two parameters are [consensus | |||
parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291), | |||
meaning they are configured by the ABCI application and are expected to be the | |||
same across all nodes on the network. | |||
### `Precision` | |||
The `Precision` parameter configures the acceptable upper-bound of clock drift | |||
among all of the nodes on a Tendermint network. Any two nodes on a Tendermint | |||
network are expected to have clocks that differ by at most `Precision` | |||
milliseconds any given instant. | |||
### `MessageDelay` | |||
The `MessageDelay` parameter configures the acceptable upper-bound for | |||
transmitting a `Proposal` message from the proposer to _all_ of the validators | |||
on the network. | |||
Networks should choose as small a value for `MessageDelay` as is practical, | |||
provided it is large enough that messages can reach all participants with high | |||
probability given the number of participants and latency of their connections. | |||
## Algorithm Concepts | |||
### Block timestamps | |||
Each block produced by the Tendermint consensus engine contains a timestamp. | |||
The timestamp produced in each block is a meaningful representation of time that is | |||
useful for the protocols and applications built on top of Tendermint. | |||
The following protocols and application features require a reliable source of time: | |||
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. | |||
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification). | |||
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 | |||
days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime). | |||
* IBC packets can use either a [timestamp or a height to timeout packet | |||
delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements) | |||
### Proposer Selects a Block Timestamp | |||
When the proposer node creates a new block proposal, the node reads the time | |||
from its local clock and uses this reading as the timestamp for the proposed | |||
block. | |||
### Timeliness | |||
When each validator on a Tendermint network receives a proposed block, it | |||
performs a series of checks to ensure that the block can be considered valid as | |||
a candidate to be the next block in the chain. | |||
The PBTS algorithm performs a validity check on the timestamp of proposed | |||
blocks. When a validator receives a proposal it ensures that the timestamp in | |||
the proposal is within a bound of the validator's local clock. Specifically, the | |||
algorithm checks that the timestamp is no more than `Precision` greater than the | |||
node's local clock and no less than `Precision` + `MessageDelay` behind than the | |||
node's local clock. This creates range of acceptable timestamps around the | |||
node's local time. If the timestamp is within this range, the PBTS algorithm | |||
considers the block **timely**. If a block is not **timely**, the node will | |||
issue a `nil` `prevote` for this block, signaling to the rest of the network | |||
that the node does not consider the block to be valid. | |||
### Clock Synchronization | |||
The PBTS algorithm requires the clocks of the validators on a Tendermint network | |||
are within `Precision` of each other. In practice, this means that validators | |||
should periodically synchronize to a reliable NTP server. Validators that drift | |||
too far away from the rest of the network will no longer propose blocks with | |||
valid timestamps. Additionally they will not view the timestamps of blocks | |||
proposed by their peers to be valid either. | |||
## See Also | |||
* [The PBTS specification](https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md) | |||
contains all of the details of the algorithm. |
@ -1,3 +1,4 @@ | |||
master master | |||
v0.33.x v0.33 | |||
v0.34.x v0.34 | |||
v0.35.x v0.35 |
@ -1,46 +0,0 @@ | |||
package mock | |||
import ( | |||
"context" | |||
abci "github.com/tendermint/tendermint/abci/types" | |||
"github.com/tendermint/tendermint/internal/libs/clist" | |||
"github.com/tendermint/tendermint/internal/mempool" | |||
"github.com/tendermint/tendermint/types" | |||
) | |||
// Mempool is an empty implementation of a Mempool, useful for testing. | |||
type Mempool struct{} | |||
var _ Mempool = Mempool{} | |||
func (Mempool) Lock() {} | |||
func (Mempool) Unlock() {} | |||
func (Mempool) Size() int { return 0 } | |||
func (Mempool) CheckTx(context.Context, types.Tx, func(*abci.ResponseCheckTx), mempool.TxInfo) error { | |||
return nil | |||
} | |||
func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } | |||
func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } | |||
func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } | |||
func (Mempool) Update( | |||
_ context.Context, | |||
_ int64, | |||
_ types.Txs, | |||
_ []*abci.ResponseDeliverTx, | |||
_ mempool.PreCheckFunc, | |||
_ mempool.PostCheckFunc, | |||
) error { | |||
return nil | |||
} | |||
func (Mempool) Flush() {} | |||
func (Mempool) FlushAppConn(ctx context.Context) error { return nil } | |||
func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } | |||
func (Mempool) EnableTxsAvailable() {} | |||
func (Mempool) SizeBytes() int64 { return 0 } | |||
func (Mempool) TxsFront() *clist.CElement { return nil } | |||
func (Mempool) TxsWaitChan() <-chan struct{} { return nil } | |||
func (Mempool) InitWAL() error { return nil } | |||
func (Mempool) CloseWAL() {} |
@ -0,0 +1,172 @@ | |||
// Code generated by mockery. DO NOT EDIT. | |||
package mocks | |||
import ( | |||
context "context" | |||
abcitypes "github.com/tendermint/tendermint/abci/types" | |||
mempool "github.com/tendermint/tendermint/internal/mempool" | |||
mock "github.com/stretchr/testify/mock" | |||
types "github.com/tendermint/tendermint/types" | |||
) | |||
// Mempool is an autogenerated mock type for the Mempool type | |||
type Mempool struct { | |||
mock.Mock | |||
} | |||
// CheckTx provides a mock function with given fields: ctx, tx, callback, txInfo | |||
func (_m *Mempool) CheckTx(ctx context.Context, tx types.Tx, callback func(*abcitypes.ResponseCheckTx), txInfo mempool.TxInfo) error { | |||
ret := _m.Called(ctx, tx, callback, txInfo) | |||
var r0 error | |||
if rf, ok := ret.Get(0).(func(context.Context, types.Tx, func(*abcitypes.ResponseCheckTx), mempool.TxInfo) error); ok { | |||
r0 = rf(ctx, tx, callback, txInfo) | |||
} else { | |||
r0 = ret.Error(0) | |||
} | |||
return r0 | |||
} | |||
// EnableTxsAvailable provides a mock function with given fields: | |||
func (_m *Mempool) EnableTxsAvailable() { | |||
_m.Called() | |||
} | |||
// Flush provides a mock function with given fields: | |||
func (_m *Mempool) Flush() { | |||
_m.Called() | |||
} | |||
// FlushAppConn provides a mock function with given fields: _a0 | |||
func (_m *Mempool) FlushAppConn(_a0 context.Context) error { | |||
ret := _m.Called(_a0) | |||
var r0 error | |||
if rf, ok := ret.Get(0).(func(context.Context) error); ok { | |||
r0 = rf(_a0) | |||
} else { | |||
r0 = ret.Error(0) | |||
} | |||
return r0 | |||
} | |||
// Lock provides a mock function with given fields: | |||
func (_m *Mempool) Lock() { | |||
_m.Called() | |||
} | |||
// ReapMaxBytesMaxGas provides a mock function with given fields: maxBytes, maxGas | |||
func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { | |||
ret := _m.Called(maxBytes, maxGas) | |||
var r0 types.Txs | |||
if rf, ok := ret.Get(0).(func(int64, int64) types.Txs); ok { | |||
r0 = rf(maxBytes, maxGas) | |||
} else { | |||
if ret.Get(0) != nil { | |||
r0 = ret.Get(0).(types.Txs) | |||
} | |||
} | |||
return r0 | |||
} | |||
// ReapMaxTxs provides a mock function with given fields: max | |||
func (_m *Mempool) ReapMaxTxs(max int) types.Txs { | |||
ret := _m.Called(max) | |||
var r0 types.Txs | |||
if rf, ok := ret.Get(0).(func(int) types.Txs); ok { | |||
r0 = rf(max) | |||
} else { | |||
if ret.Get(0) != nil { | |||
r0 = ret.Get(0).(types.Txs) | |||
} | |||
} | |||
return r0 | |||
} | |||
// RemoveTxByKey provides a mock function with given fields: txKey | |||
func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { | |||
ret := _m.Called(txKey) | |||
var r0 error | |||
if rf, ok := ret.Get(0).(func(types.TxKey) error); ok { | |||
r0 = rf(txKey) | |||
} else { | |||
r0 = ret.Error(0) | |||
} | |||
return r0 | |||
} | |||
// Size provides a mock function with given fields: | |||
func (_m *Mempool) Size() int { | |||
ret := _m.Called() | |||
var r0 int | |||
if rf, ok := ret.Get(0).(func() int); ok { | |||
r0 = rf() | |||
} else { | |||
r0 = ret.Get(0).(int) | |||
} | |||
return r0 | |||
} | |||
// SizeBytes provides a mock function with given fields: | |||
func (_m *Mempool) SizeBytes() int64 { | |||
ret := _m.Called() | |||
var r0 int64 | |||
if rf, ok := ret.Get(0).(func() int64); ok { | |||
r0 = rf() | |||
} else { | |||
r0 = ret.Get(0).(int64) | |||
} | |||
return r0 | |||
} | |||
// TxsAvailable provides a mock function with given fields: | |||
func (_m *Mempool) TxsAvailable() <-chan struct{} { | |||
ret := _m.Called() | |||
var r0 <-chan struct{} | |||
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { | |||
r0 = rf() | |||
} else { | |||
if ret.Get(0) != nil { | |||
r0 = ret.Get(0).(<-chan struct{}) | |||
} | |||
} | |||
return r0 | |||
} | |||
// Unlock provides a mock function with given fields: | |||
func (_m *Mempool) Unlock() { | |||
_m.Called() | |||
} | |||
// Update provides a mock function with given fields: ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn | |||
func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { | |||
ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) | |||
var r0 error | |||
if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { | |||
r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) | |||
} else { | |||
r0 = ret.Error(0) | |||
} | |||
return r0 | |||
} |
@ -1,249 +0,0 @@ | |||
package proxy | |||
import ( | |||
"context" | |||
"time" | |||
"github.com/go-kit/kit/metrics" | |||
abciclient "github.com/tendermint/tendermint/abci/client" | |||
"github.com/tendermint/tendermint/abci/types" | |||
) | |||
//go:generate ../../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot | |||
//---------------------------------------------------------------------------------------- | |||
// Enforce which abci msgs can be sent on a connection at the type level | |||
type AppConnConsensus interface { | |||
Error() error | |||
InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) | |||
PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) | |||
ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error) | |||
ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error) | |||
VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) | |||
FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) | |||
Commit(context.Context) (*types.ResponseCommit, error) | |||
} | |||
type AppConnMempool interface { | |||
Error() error | |||
CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) | |||
Flush(context.Context) error | |||
} | |||
type AppConnQuery interface { | |||
Error() error | |||
Echo(context.Context, string) (*types.ResponseEcho, error) | |||
Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error) | |||
Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error) | |||
} | |||
type AppConnSnapshot interface { | |||
Error() error | |||
ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) | |||
OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) | |||
LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) | |||
ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) | |||
} | |||
//----------------------------------------------------------------------------------------- | |||
// Implements AppConnConsensus (subset of abciclient.Client) | |||
type appConnConsensus struct { | |||
metrics *Metrics | |||
appConn abciclient.Client | |||
} | |||
var _ AppConnConsensus = (*appConnConsensus)(nil) | |||
func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus { | |||
return &appConnConsensus{ | |||
metrics: metrics, | |||
appConn: appConn, | |||
} | |||
} | |||
func (app *appConnConsensus) Error() error { | |||
return app.appConn.Error() | |||
} | |||
func (app *appConnConsensus) InitChain( | |||
ctx context.Context, | |||
req types.RequestInitChain, | |||
) (*types.ResponseInitChain, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() | |||
return app.appConn.InitChain(ctx, req) | |||
} | |||
func (app *appConnConsensus) PrepareProposal( | |||
ctx context.Context, | |||
req types.RequestPrepareProposal, | |||
) (*types.ResponsePrepareProposal, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))() | |||
return app.appConn.PrepareProposal(ctx, req) | |||
} | |||
func (app *appConnConsensus) ProcessProposal( | |||
ctx context.Context, | |||
req types.RequestProcessProposal, | |||
) (*types.ResponseProcessProposal, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))() | |||
return app.appConn.ProcessProposal(ctx, req) | |||
} | |||
func (app *appConnConsensus) ExtendVote( | |||
ctx context.Context, | |||
req types.RequestExtendVote, | |||
) (*types.ResponseExtendVote, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))() | |||
return app.appConn.ExtendVote(ctx, req) | |||
} | |||
func (app *appConnConsensus) VerifyVoteExtension( | |||
ctx context.Context, | |||
req types.RequestVerifyVoteExtension, | |||
) (*types.ResponseVerifyVoteExtension, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))() | |||
return app.appConn.VerifyVoteExtension(ctx, req) | |||
} | |||
func (app *appConnConsensus) FinalizeBlock( | |||
ctx context.Context, | |||
req types.RequestFinalizeBlock, | |||
) (*types.ResponseFinalizeBlock, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))() | |||
return app.appConn.FinalizeBlock(ctx, req) | |||
} | |||
func (app *appConnConsensus) Commit(ctx context.Context) (*types.ResponseCommit, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() | |||
return app.appConn.Commit(ctx) | |||
} | |||
//------------------------------------------------ | |||
// Implements AppConnMempool (subset of abciclient.Client) | |||
type appConnMempool struct { | |||
metrics *Metrics | |||
appConn abciclient.Client | |||
} | |||
func NewAppConnMempool(appConn abciclient.Client, metrics *Metrics) AppConnMempool { | |||
return &appConnMempool{ | |||
metrics: metrics, | |||
appConn: appConn, | |||
} | |||
} | |||
func (app *appConnMempool) Error() error { | |||
return app.appConn.Error() | |||
} | |||
func (app *appConnMempool) Flush(ctx context.Context) error { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() | |||
return app.appConn.Flush(ctx) | |||
} | |||
func (app *appConnMempool) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() | |||
return app.appConn.CheckTx(ctx, req) | |||
} | |||
//------------------------------------------------ | |||
// Implements AppConnQuery (subset of abciclient.Client) | |||
type appConnQuery struct { | |||
metrics *Metrics | |||
appConn abciclient.Client | |||
} | |||
func NewAppConnQuery(appConn abciclient.Client, metrics *Metrics) AppConnQuery { | |||
return &appConnQuery{ | |||
metrics: metrics, | |||
appConn: appConn, | |||
} | |||
} | |||
func (app *appConnQuery) Error() error { | |||
return app.appConn.Error() | |||
} | |||
func (app *appConnQuery) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() | |||
return app.appConn.Echo(ctx, msg) | |||
} | |||
func (app *appConnQuery) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() | |||
return app.appConn.Info(ctx, req) | |||
} | |||
func (app *appConnQuery) Query(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() | |||
return app.appConn.Query(ctx, reqQuery) | |||
} | |||
//------------------------------------------------ | |||
// Implements AppConnSnapshot (subset of abciclient.Client) | |||
type appConnSnapshot struct { | |||
metrics *Metrics | |||
appConn abciclient.Client | |||
} | |||
func NewAppConnSnapshot(appConn abciclient.Client, metrics *Metrics) AppConnSnapshot { | |||
return &appConnSnapshot{ | |||
metrics: metrics, | |||
appConn: appConn, | |||
} | |||
} | |||
func (app *appConnSnapshot) Error() error { | |||
return app.appConn.Error() | |||
} | |||
func (app *appConnSnapshot) ListSnapshots( | |||
ctx context.Context, | |||
req types.RequestListSnapshots, | |||
) (*types.ResponseListSnapshots, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() | |||
return app.appConn.ListSnapshots(ctx, req) | |||
} | |||
func (app *appConnSnapshot) OfferSnapshot( | |||
ctx context.Context, | |||
req types.RequestOfferSnapshot, | |||
) (*types.ResponseOfferSnapshot, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() | |||
return app.appConn.OfferSnapshot(ctx, req) | |||
} | |||
func (app *appConnSnapshot) LoadSnapshotChunk( | |||
ctx context.Context, | |||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() | |||
return app.appConn.LoadSnapshotChunk(ctx, req) | |||
} | |||
func (app *appConnSnapshot) ApplySnapshotChunk( | |||
ctx context.Context, | |||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() | |||
return app.appConn.ApplySnapshotChunk(ctx, req) | |||
} | |||
// addTimeSample returns a function that, when called, adds an observation to m. | |||
// The observation added to m is the number of seconds ellapsed since addTimeSample | |||
// was initially called. addTimeSample is meant to be called in a defer to calculate | |||
// the amount of time a function takes to complete. | |||
func addTimeSample(m metrics.Histogram) func() { | |||
start := time.Now() | |||
return func() { m.Observe(time.Since(start).Seconds()) } | |||
} |
@ -1,42 +1,213 @@ | |||
package proxy | |||
import ( | |||
"context" | |||
"io" | |||
"os" | |||
"syscall" | |||
"time" | |||
"github.com/go-kit/kit/metrics" | |||
abciclient "github.com/tendermint/tendermint/abci/client" | |||
"github.com/tendermint/tendermint/abci/example/kvstore" | |||
"github.com/tendermint/tendermint/abci/types" | |||
"github.com/tendermint/tendermint/libs/log" | |||
"github.com/tendermint/tendermint/libs/service" | |||
e2e "github.com/tendermint/tendermint/test/e2e/app" | |||
) | |||
// DefaultClientCreator returns a default ClientCreator, which will create a | |||
// local client if addr is one of: 'kvstore', | |||
// 'persistent_kvstore', 'e2e', or 'noop', otherwise - a remote client. | |||
// ClientFactory returns a client object, which will create a local | |||
// client if addr is one of: 'kvstore', 'persistent_kvstore', 'e2e', | |||
// or 'noop', otherwise - a remote client. | |||
// | |||
// The Closer is a noop except for persistent_kvstore applications, | |||
// which will clean up the store. | |||
func DefaultClientCreator(logger log.Logger, addr, transport, dbDir string) (abciclient.Creator, io.Closer) { | |||
func ClientFactory(logger log.Logger, addr, transport, dbDir string) (abciclient.Client, io.Closer, error) { | |||
switch addr { | |||
case "kvstore": | |||
return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} | |||
return abciclient.NewLocalClient(logger, kvstore.NewApplication()), noopCloser{}, nil | |||
case "persistent_kvstore": | |||
app := kvstore.NewPersistentKVStoreApplication(logger, dbDir) | |||
return abciclient.NewLocalCreator(app), app | |||
return abciclient.NewLocalClient(logger, app), app, nil | |||
case "e2e": | |||
app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) | |||
if err != nil { | |||
panic(err) | |||
return nil, noopCloser{}, err | |||
} | |||
return abciclient.NewLocalCreator(app), noopCloser{} | |||
return abciclient.NewLocalClient(logger, app), noopCloser{}, nil | |||
case "noop": | |||
return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} | |||
return abciclient.NewLocalClient(logger, types.NewBaseApplication()), noopCloser{}, nil | |||
default: | |||
mustConnect := false // loop retrying | |||
return abciclient.NewRemoteCreator(logger, addr, transport, mustConnect), noopCloser{} | |||
const mustConnect = false // loop retrying | |||
client, err := abciclient.NewClient(logger, addr, transport, mustConnect) | |||
if err != nil { | |||
return nil, noopCloser{}, err | |||
} | |||
return client, noopCloser{}, nil | |||
} | |||
} | |||
type noopCloser struct{} | |||
func (noopCloser) Close() error { return nil } | |||
// proxyClient provides the application connection. | |||
type proxyClient struct { | |||
service.BaseService | |||
logger log.Logger | |||
client abciclient.Client | |||
metrics *Metrics | |||
} | |||
// New creates a proxy application interface. | |||
func New(client abciclient.Client, logger log.Logger, metrics *Metrics) abciclient.Client { | |||
conn := &proxyClient{ | |||
logger: logger, | |||
metrics: metrics, | |||
client: client, | |||
} | |||
conn.BaseService = *service.NewBaseService(logger, "proxyClient", conn) | |||
return conn | |||
} | |||
func (app *proxyClient) OnStop() { tryCallStop(app.client) } | |||
func (app *proxyClient) Error() error { return app.client.Error() } | |||
func tryCallStop(client abciclient.Client) { | |||
if c, ok := client.(interface{ Stop() }); ok { | |||
c.Stop() | |||
} | |||
} | |||
func (app *proxyClient) OnStart(ctx context.Context) error { | |||
var err error | |||
defer func() { | |||
if err != nil { | |||
tryCallStop(app.client) | |||
} | |||
}() | |||
// Kill Tendermint if the ABCI application crashes. | |||
go func() { | |||
if !app.client.IsRunning() { | |||
return | |||
} | |||
app.client.Wait() | |||
if ctx.Err() != nil { | |||
return | |||
} | |||
if err := app.client.Error(); err != nil { | |||
app.logger.Error("client connection terminated. Did the application crash? Please restart tendermint", | |||
"err", err) | |||
if killErr := kill(); killErr != nil { | |||
app.logger.Error("Failed to kill this process - please do so manually", | |||
"err", killErr) | |||
} | |||
} | |||
}() | |||
return app.client.Start(ctx) | |||
} | |||
func kill() error { | |||
p, err := os.FindProcess(os.Getpid()) | |||
if err != nil { | |||
return err | |||
} | |||
return p.Signal(syscall.SIGABRT) | |||
} | |||
func (app *proxyClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() | |||
return app.client.InitChain(ctx, req) | |||
} | |||
func (app *proxyClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))() | |||
return app.client.PrepareProposal(ctx, req) | |||
} | |||
func (app *proxyClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))() | |||
return app.client.ProcessProposal(ctx, req) | |||
} | |||
func (app *proxyClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))() | |||
return app.client.ExtendVote(ctx, req) | |||
} | |||
func (app *proxyClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))() | |||
return app.client.VerifyVoteExtension(ctx, req) | |||
} | |||
func (app *proxyClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))() | |||
return app.client.FinalizeBlock(ctx, req) | |||
} | |||
func (app *proxyClient) Commit(ctx context.Context) (*types.ResponseCommit, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() | |||
return app.client.Commit(ctx) | |||
} | |||
func (app *proxyClient) Flush(ctx context.Context) error { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() | |||
return app.client.Flush(ctx) | |||
} | |||
func (app *proxyClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() | |||
return app.client.CheckTx(ctx, req) | |||
} | |||
func (app *proxyClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() | |||
return app.client.Echo(ctx, msg) | |||
} | |||
func (app *proxyClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() | |||
return app.client.Info(ctx, req) | |||
} | |||
func (app *proxyClient) Query(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() | |||
return app.client.Query(ctx, reqQuery) | |||
} | |||
func (app *proxyClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() | |||
return app.client.ListSnapshots(ctx, req) | |||
} | |||
func (app *proxyClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() | |||
return app.client.OfferSnapshot(ctx, req) | |||
} | |||
func (app *proxyClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() | |||
return app.client.LoadSnapshotChunk(ctx, req) | |||
} | |||
func (app *proxyClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { | |||
defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() | |||
return app.client.ApplySnapshotChunk(ctx, req) | |||
} | |||
// addTimeSample returns a function that, when called, adds an observation to m. | |||
// The observation added to m is the number of seconds ellapsed since addTimeSample | |||
// was initially called. addTimeSample is meant to be called in a defer to calculate | |||
// the amount of time a function takes to complete. | |||
func addTimeSample(m metrics.Histogram) func() { | |||
start := time.Now() | |||
return func() { m.Observe(time.Since(start).Seconds()) } | |||
} |
@ -1,131 +0,0 @@ | |||
package proxy | |||
import ( | |||
"context" | |||
"os" | |||
"syscall" | |||
abciclient "github.com/tendermint/tendermint/abci/client" | |||
"github.com/tendermint/tendermint/libs/log" | |||
"github.com/tendermint/tendermint/libs/service" | |||
) | |||
// AppConns is the Tendermint's interface to the application that consists of | |||
// multiple connections. | |||
type AppConns interface { | |||
service.Service | |||
// Mempool connection | |||
Mempool() AppConnMempool | |||
// Consensus connection | |||
Consensus() AppConnConsensus | |||
// Query connection | |||
Query() AppConnQuery | |||
// Snapshot connection | |||
Snapshot() AppConnSnapshot | |||
} | |||
// NewAppConns calls NewMultiAppConn. | |||
func NewAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { | |||
return NewMultiAppConn(clientCreator, logger, metrics) | |||
} | |||
// multiAppConn implements AppConns. | |||
// | |||
// A multiAppConn is made of a few appConns and manages their underlying abci | |||
// clients. | |||
// TODO: on app restart, clients must reboot together | |||
type multiAppConn struct { | |||
service.BaseService | |||
logger log.Logger | |||
metrics *Metrics | |||
consensusConn AppConnConsensus | |||
mempoolConn AppConnMempool | |||
queryConn AppConnQuery | |||
snapshotConn AppConnSnapshot | |||
client stoppableClient | |||
clientCreator abciclient.Creator | |||
} | |||
// TODO: this is a totally internal and quasi permanent shim for | |||
// clients. eventually we can have a single client and have some kind | |||
// of reasonable lifecycle witout needing an explicit stop method. | |||
type stoppableClient interface { | |||
abciclient.Client | |||
Stop() | |||
} | |||
// NewMultiAppConn makes all necessary abci connections to the application. | |||
func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { | |||
multiAppConn := &multiAppConn{ | |||
logger: logger, | |||
metrics: metrics, | |||
clientCreator: clientCreator, | |||
} | |||
multiAppConn.BaseService = *service.NewBaseService(logger, "multiAppConn", multiAppConn) | |||
return multiAppConn | |||
} | |||
func (app *multiAppConn) Mempool() AppConnMempool { return app.mempoolConn } | |||
func (app *multiAppConn) Consensus() AppConnConsensus { return app.consensusConn } | |||
func (app *multiAppConn) Query() AppConnQuery { return app.queryConn } | |||
func (app *multiAppConn) Snapshot() AppConnSnapshot { return app.snapshotConn } | |||
func (app *multiAppConn) OnStart(ctx context.Context) error { | |||
var err error | |||
defer func() { | |||
if err != nil { | |||
app.client.Stop() | |||
} | |||
}() | |||
var client abciclient.Client | |||
client, err = app.clientCreator(app.logger) | |||
if err != nil { | |||
return err | |||
} | |||
app.queryConn = NewAppConnQuery(client, app.metrics) | |||
app.snapshotConn = NewAppConnSnapshot(client, app.metrics) | |||
app.mempoolConn = NewAppConnMempool(client, app.metrics) | |||
app.consensusConn = NewAppConnConsensus(client, app.metrics) | |||
app.client = client.(stoppableClient) | |||
// Kill Tendermint if the ABCI application crashes. | |||
go func() { | |||
if !client.IsRunning() { | |||
return | |||
} | |||
app.client.Wait() | |||
if ctx.Err() != nil { | |||
return | |||
} | |||
if err := app.client.Error(); err != nil { | |||
app.logger.Error("client connection terminated. Did the application crash? Please restart tendermint", | |||
"err", err) | |||
if killErr := kill(); killErr != nil { | |||
app.logger.Error("Failed to kill this process - please do so manually", | |||
"err", killErr) | |||
} | |||
} | |||
}() | |||
return client.Start(ctx) | |||
} | |||
func (app *multiAppConn) OnStop() { app.client.Stop() } | |||
func kill() error { | |||
p, err := os.FindProcess(os.Getpid()) | |||
if err != nil { | |||
return err | |||
} | |||
return p.Signal(syscall.SIGTERM) | |||
} |