Browse Source

spec: merge spec repo into tendermint repo (#7804)

pull/7804/head
Callum Waters 2 years ago
parent
commit
e81b0e290e
235 changed files with 3703 additions and 4485 deletions
  1. +0
    -17
      .github/workflows/action.yml
  2. +36
    -0
      .github/workflows/e2e-manual.yml
  3. +0
    -17
      .github/workflows/e2e-nightly-34x.yml
  4. +0
    -1
      .github/workflows/e2e-nightly-35x.yml
  5. +0
    -1
      .github/workflows/e2e-nightly-master.yml
  6. +1
    -1
      .github/workflows/lint.yml
  7. +1
    -1
      .github/workflows/linter.yml
  8. +18
    -0
      .github/workflows/markdown-links.yml
  9. +1
    -8
      .gitignore
  10. +2
    -2
      .markdownlint.yml
  11. +1
    -1
      CHANGELOG_PENDING.md
  12. +1
    -1
      Makefile
  13. +1
    -4
      abci/client/client.go
  14. +8
    -40
      abci/client/grpc_client.go
  15. +11
    -47
      abci/client/local_client.go
  16. +23
    -92
      abci/client/mocks/client.go
  17. +13
    -46
      abci/client/socket_client.go
  18. +3
    -3
      abci/client/socket_client_test.go
  19. +41
    -18
      abci/cmd/abci-cli/abci-cli.go
  20. +27
    -65
      abci/example/example_test.go
  21. +12
    -5
      abci/example/kvstore/kvstore.go
  22. +26
    -33
      abci/example/kvstore/kvstore_test.go
  23. +24
    -22
      abci/example/kvstore/persistent_kvstore.go
  24. +3
    -9
      abci/server/socket_server.go
  25. +17
    -15
      abci/tests/server/client.go
  26. +18
    -33
      abci/types/application.go
  27. +13
    -35
      abci/types/messages.go
  28. +1516
    -2198
      abci/types/types.pb.go
  29. +46
    -0
      cmd/tendermint/commands/completion.go
  30. +1
    -0
      cmd/tendermint/commands/key_migrate.go
  31. +4
    -5
      cmd/tendermint/commands/reindex_event.go
  32. +6
    -5
      cmd/tendermint/commands/reindex_event_test.go
  33. +1
    -0
      cmd/tendermint/commands/replay.go
  34. +10
    -6
      cmd/tendermint/commands/rollback_test.go
  35. +27
    -6
      cmd/tendermint/commands/root_test.go
  36. +1
    -1
      cmd/tendermint/commands/run_node.go
  37. +1
    -0
      cmd/tendermint/commands/show_node_id.go
  38. +2
    -2
      cmd/tendermint/main.go
  39. +4
    -4
      config/toml.go
  40. +2
    -4
      config/toml_test.go
  41. +1
    -0
      crypto/ed25519/bench_test.go
  42. +66
    -1
      crypto/secp256k1/secp256k1.go
  43. +0
    -76
      crypto/secp256k1/secp256k1_nocgo.go
  44. +1
    -0
      crypto/sr25519/bench_test.go
  45. +3
    -3
      docs/architecture/adr-071-proposer-based-timestamps.md
  46. +3
    -2
      docs/architecture/adr-077-block-retention.md
  47. +2
    -1
      docs/architecture/adr-078-nonzero-genesis.md
  48. +3
    -2
      docs/architecture/adr-079-ed25519-verification.md
  49. +2
    -1
      docs/architecture/adr-080-reverse-sync.md
  50. +0
    -0
      docs/architecture/img/block-retention.png
  51. +1
    -1
      docs/pre.sh
  52. +4
    -1
      docs/rfc/README.md
  53. +0
    -0
      docs/rfc/images/abci++.png
  54. +0
    -0
      docs/rfc/images/abci.png
  55. +257
    -0
      docs/rfc/rfc-011-abci++.md
  56. +162
    -0
      docs/rfc/rfc-011-delete-gas.md
  57. +98
    -0
      docs/rfc/rfc-012-semantic-versioning.md
  58. +3
    -2
      docs/rfc/rfc-013-abci++.md
  59. +2
    -1
      docs/rfc/rfc-014-semantic-versioning.md
  60. +2
    -2
      docs/roadmap/roadmap.md
  61. +1
    -1
      docs/tendermint-core/block-structure.md
  62. +1
    -1
      docs/tendermint-core/consensus/README.md
  63. +1
    -1
      docs/tendermint-core/subscription.md
  64. +1
    -2
      go.mod
  65. +2
    -4
      go.sum
  66. +6
    -8
      internal/blocksync/pool_test.go
  67. +4
    -4
      internal/blocksync/reactor_test.go
  68. +0
    -3
      internal/consensus/README.md
  69. +4
    -2
      internal/consensus/byzantine_test.go
  70. +13
    -14
      internal/consensus/common_test.go
  71. +1
    -0
      internal/consensus/invalid_test.go
  72. +19
    -13
      internal/consensus/mempool_test.go
  73. +1
    -0
      internal/consensus/metrics.go
  74. +1
    -0
      internal/consensus/mocks/cons_sync_reactor.go
  75. +1
    -0
      internal/consensus/mocks/fast_sync_reactor.go
  76. +1
    -1
      internal/consensus/reactor_test.go
  77. +3
    -8
      internal/consensus/replay_stubs.go
  78. +24
    -21
      internal/consensus/replay_test.go
  79. +3
    -1
      internal/consensus/state.go
  80. +10
    -18
      internal/consensus/types/height_vote_set_test.go
  81. +1
    -0
      internal/consensus/types/peer_round_state_test.go
  82. +1
    -1
      internal/consensus/wal_generator.go
  83. +2
    -2
      internal/eventbus/event_bus.go
  84. +17
    -25
      internal/eventbus/event_bus_test.go
  85. +1
    -1
      internal/evidence/doc.go
  86. +1
    -0
      internal/evidence/mocks/block_store.go
  87. +8
    -7
      internal/inspect/inspect_test.go
  88. +3
    -9
      internal/libs/autofile/autofile_test.go
  89. +2
    -5
      internal/libs/autofile/group_test.go
  90. +0
    -31
      internal/libs/sync/closer.go
  91. +0
    -28
      internal/libs/sync/closer_test.go
  92. +1
    -1
      internal/libs/tempfile/tempfile_test.go
  93. +1
    -0
      internal/mempool/ids_test.go
  94. +9
    -2
      internal/mempool/mempool_bench_test.go
  95. +1
    -1
      internal/mempool/mempool_test.go
  96. +2
    -1
      internal/mempool/reactor_test.go
  97. +1
    -0
      internal/mempool/tx_test.go
  98. +1
    -0
      internal/p2p/channel.go
  99. +1
    -0
      internal/p2p/metrics_test.go
  100. +1
    -0
      internal/p2p/p2ptest/util.go

+ 0
- 17
.github/workflows/action.yml View File

@ -1,17 +0,0 @@
name: Check Markdown links
on:
push:
branches:
- master
pull_request:
branches: [master]
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
with:
check-modified-files-only: 'yes'

+ 36
- 0
.github/workflows/e2e-manual.yml View File

@ -0,0 +1,36 @@
# Runs randomly generated E2E testnets nightly on master
# manually run e2e tests
name: e2e-manual
on:
workflow_dispatch:
jobs:
e2e-nightly-test:
# Run parallel jobs for the listed testnet groups (must match the
# ./build/generator -g flag)
strategy:
fail-fast: false
matrix:
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- name: Build
working-directory: test/e2e
# Run make jobs in parallel, since we can't run steps in parallel.
run: make -j2 docker generator runner tests
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 4 -d networks/nightly/
- name: Run ${{ matrix.p2p }} p2p testnets
working-directory: test/e2e
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml

+ 0
- 17
.github/workflows/e2e-nightly-34x.yml View File

@ -6,7 +6,6 @@
name: e2e-nightly-34x
on:
workflow_dispatch: # allow running workflow manually, in theory
schedule:
- cron: '0 2 * * *'
@ -58,19 +57,3 @@ jobs:
SLACK_COLOR: danger
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
SLACK_FOOTER: ''
e2e-nightly-success: # may turn this off once they seem to pass consistently
needs: e2e-nightly-test
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
SLACK_USERNAME: Nightly E2E Tests
SLACK_ICON_EMOJI: ':white_check_mark:'
SLACK_COLOR: good
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
SLACK_FOOTER: ''

+ 0
- 1
.github/workflows/e2e-nightly-35x.yml View File

@ -5,7 +5,6 @@
name: e2e-nightly-35x
on:
workflow_dispatch: # allow running workflow manually
schedule:
- cron: '0 2 * * *'


+ 0
- 1
.github/workflows/e2e-nightly-master.yml View File

@ -5,7 +5,6 @@
name: e2e-nightly-master
on:
workflow_dispatch: # allow running workflow manually
schedule:
- cron: '0 2 * * *'


+ 1
- 1
.github/workflows/lint.yml View File

@ -1,4 +1,4 @@
name: Lint
name: Golang Linter
# Lint runs golangci-lint over the entire Tendermint repository
# This workflow is run on every pull request and push to master
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.


+ 1
- 1
.github/workflows/linter.yml View File

@ -1,4 +1,4 @@
name: Lint
name: Markdown Linter
on:
push:
branches:


+ 18
- 0
.github/workflows/markdown-links.yml View File

@ -0,0 +1,18 @@
# Currently disabled until all links have been fixed
# name: Check Markdown links
# on:
# push:
# branches:
# - master
# pull_request:
# branches: [master]
# jobs:
# markdown-link-check:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@master
# - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
# with:
# check-modified-files-only: 'yes'

+ 1
- 8
.gitignore View File

@ -47,13 +47,7 @@ test/fuzz/**/corpus
test/fuzz/**/crashers
test/fuzz/**/suppressions
test/fuzz/**/*.zip
proto/tendermint/blocksync/types.proto
proto/tendermint/consensus/types.proto
proto/tendermint/mempool/*.proto
proto/tendermint/p2p/*.proto
proto/tendermint/statesync/*.proto
proto/tendermint/types/*.proto
proto/tendermint/version/*.proto
proto/spec/**/*.pb.go
*.aux
*.bbl
*.blg
@ -61,4 +55,3 @@ proto/tendermint/version/*.proto
*.pdf
*.gz
*.dvi
*.pb.go

+ 2
- 2
.markdownlint.yml View File

@ -1,8 +1,8 @@
default: true
MD001: false
MD007: { indent: 4 }
MD007: {indent: 4}
MD013: false
MD024: { siblings_only: true }
MD024: {siblings_only: true}
MD025: false
MD033: false
MD036: false


+ 1
- 1
CHANGELOG_PENDING.md View File

@ -20,7 +20,7 @@ Special thanks to external contributors on this release:
- Apps
- [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository.
- [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec).
- P2P Protocol


+ 1
- 1
Makefile View File

@ -78,7 +78,7 @@ $(BUILDDIR)/:
# there and run the Build & Push Proto Builder Image workflow.
IMAGE := ghcr.io/tendermint/docker-build-proto:latest
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE)
HTTPS_GIT := https://github.com/tendermint/spec.git
HTTPS_GIT := https://github.com/tendermint/tendermint.git
###############################################################################
### Protobuf ###


+ 1
- 4
abci/client/client.go View File

@ -33,14 +33,12 @@ type Client interface {
// Asynchronous requests
FlushAsync(context.Context) (*ReqRes, error)
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
// Synchronous requests
Flush(context.Context) error
Echo(ctx context.Context, msg string) (*types.ResponseEcho, error)
Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
DeliverTx(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
Commit(context.Context) (*types.ResponseCommit, error)
@ -49,8 +47,7 @@ type Client interface {
ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error)
VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)
BeginBlock(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlock(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)


+ 8
- 40
abci/client/grpc_client.go View File

@ -193,16 +193,6 @@ func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
req := types.ToRequestDeliverTx(params)
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
req := types.ToRequestCheckTx(params)
@ -271,18 +261,6 @@ func (cli *grpcClient) Info(
return cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
}
func (cli *grpcClient) DeliverTx(
ctx context.Context,
params types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.DeliverTxAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTx(
ctx context.Context,
params types.RequestCheckTx,
@ -317,24 +295,6 @@ func (cli *grpcClient) InitChain(
return cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
}
func (cli *grpcClient) BeginBlock(
ctx context.Context,
params types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
req := types.ToRequestBeginBlock(params)
return cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
}
func (cli *grpcClient) EndBlock(
ctx context.Context,
params types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
req := types.ToRequestEndBlock(params)
return cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
}
func (cli *grpcClient) ListSnapshots(
ctx context.Context,
params types.RequestListSnapshots,
@ -400,3 +360,11 @@ func (cli *grpcClient) VerifyVoteExtension(
req := types.ToRequestVerifyVoteExtension(params)
return cli.client.VerifyVoteExtension(ctx, req.GetVerifyVoteExtension(), grpc.WaitForReady(true))
}
func (cli *grpcClient) FinalizeBlock(
ctx context.Context,
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
req := types.ToRequestFinalizeBlock(params)
return cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
}

+ 11
- 47
abci/client/local_client.go View File

@ -58,17 +58,6 @@ func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
return newLocalReqRes(types.ToRequestFlush(), nil), nil
}
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(params)
return app.callback(
types.ToRequestDeliverTx(params),
types.ToResponseDeliverTx(res),
), nil
}
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@ -98,18 +87,6 @@ func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types
return &res, nil
}
func (app *localClient) DeliverTx(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(req)
return &res, nil
}
func (app *localClient) CheckTx(
ctx context.Context,
req types.RequestCheckTx,
@ -152,30 +129,6 @@ func (app *localClient) InitChain(
return &res, nil
}
func (app *localClient) BeginBlock(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return &res, nil
}
func (app *localClient) EndBlock(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return &res, nil
}
func (app *localClient) ListSnapshots(
ctx context.Context,
req types.RequestListSnapshots,
@ -266,6 +219,17 @@ func (app *localClient) VerifyVoteExtension(
return &res, nil
}
func (app *localClient) FinalizeBlock(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {


+ 23
- 92
abci/client/mocks/client.go View File

@ -40,29 +40,6 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApply
return r0, r1
}
// BeginBlock provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseBeginBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseBeginBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CheckTx provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
ret := _m.Called(_a0, _a1)
@ -132,52 +109,6 @@ func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
return r0, r1
}
// DeliverTx provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTx(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Echo provides a mock function with given fields: ctx, msg
func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
ret := _m.Called(ctx, msg)
@ -201,29 +132,6 @@ func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, er
return r0, r1
}
// EndBlock provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlock(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseEndBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseEndBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Error provides a mock function with given fields:
func (_m *Client) Error() error {
ret := _m.Called()
@ -261,6 +169,29 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (
return r0, r1
}
// FinalizeBlock provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Flush provides a mock function with given fields: _a0
func (_m *Client) Flush(_a0 context.Context) error {
ret := _m.Called(_a0)


+ 13
- 46
abci/client/socket_client.go View File

@ -226,10 +226,6 @@ func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
}
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
}
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
}
@ -280,18 +276,6 @@ func (cli *socketClient) Info(
return reqres.Response.GetInfo(), nil
}
func (cli *socketClient) DeliverTx(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestDeliverTx(req))
if err != nil {
return nil, err
}
return reqres.Response.GetDeliverTx(), nil
}
func (cli *socketClient) CheckTx(
ctx context.Context,
req types.RequestCheckTx,
@ -334,30 +318,6 @@ func (cli *socketClient) InitChain(
return reqres.Response.GetInitChain(), nil
}
func (cli *socketClient) BeginBlock(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestBeginBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetBeginBlock(), nil
}
func (cli *socketClient) EndBlock(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEndBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetEndBlock(), nil
}
func (cli *socketClient) ListSnapshots(
ctx context.Context,
req types.RequestListSnapshots,
@ -449,6 +409,17 @@ func (cli *socketClient) VerifyVoteExtension(
return reqres.Response.GetVerifyVoteExtension(), nil
}
func (cli *socketClient) FinalizeBlock(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetFinalizeBlock(), nil
}
//----------------------------------------
// queueRequest enqueues req onto the queue. If the queue is full, it ether
@ -550,8 +521,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Flush)
case *types.Request_Info:
_, ok = res.Value.(*types.Response_Info)
case *types.Request_DeliverTx:
_, ok = res.Value.(*types.Response_DeliverTx)
case *types.Request_CheckTx:
_, ok = res.Value.(*types.Response_CheckTx)
case *types.Request_Commit:
@ -566,10 +535,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_ExtendVote)
case *types.Request_VerifyVoteExtension:
_, ok = res.Value.(*types.Response_VerifyVoteExtension)
case *types.Request_BeginBlock:
_, ok = res.Value.(*types.Response_BeginBlock)
case *types.Request_EndBlock:
_, ok = res.Value.(*types.Response_EndBlock)
case *types.Request_ApplySnapshotChunk:
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
case *types.Request_LoadSnapshotChunk:
@ -578,6 +543,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_ListSnapshots)
case *types.Request_OfferSnapshot:
_, ok = res.Value.(*types.Response_OfferSnapshot)
case *types.Request_FinalizeBlock:
_, ok = res.Value.(*types.Response_FinalizeBlock)
}
return ok
}


+ 3
- 3
abci/client/socket_client_test.go View File

@ -29,7 +29,7 @@ func TestProperSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
rsp, err := c.BeginBlock(ctx, types.RequestBeginBlock{})
rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
assert.NoError(t, c.Flush(ctx))
assert.NotNil(t, rsp)
@ -79,7 +79,7 @@ type slowApp struct {
types.BaseApplication
}
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseBeginBlock{}
return types.ResponseFinalizeBlock{}
}

+ 41
- 18
abci/cmd/abci-cli/abci-cli.go View File

@ -193,7 +193,7 @@ var deliverTxCmd = &cobra.Command{
Short: "deliver a new transaction to the application",
Long: "deliver a new transaction to the application",
Args: cobra.ExactArgs(1),
RunE: cmdDeliverTx,
RunE: cmdFinalizeBlock,
}
var checkTxCmd = &cobra.Command{
@ -300,17 +300,38 @@ func cmdTest(cmd *cobra.Command, args []string) error {
[]func() error{
func() error { return servertest.InitChain(ctx, client) },
func() error { return servertest.Commit(ctx, client, nil) },
func() error { return servertest.DeliverTx(ctx, client, []byte("abc"), code.CodeTypeBadNonce, nil) },
func() error {
return servertest.FinalizeBlock(ctx, client, [][]byte{
[]byte("abc"),
}, []uint32{
code.CodeTypeBadNonce,
}, nil)
},
func() error { return servertest.Commit(ctx, client, nil) },
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeOK, nil) },
func() error {
return servertest.FinalizeBlock(ctx, client, [][]byte{
{0x00},
}, []uint32{
code.CodeTypeOK,
}, nil)
},
func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
func() error { return servertest.DeliverTx(ctx, client, []byte{0x01}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
func() error {
return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
return servertest.FinalizeBlock(ctx, client, [][]byte{
{0x00},
{0x01},
{0x00, 0x02},
{0x00, 0x03},
{0x00, 0x00, 0x04},
{0x00, 0x00, 0x06},
}, []uint32{
code.CodeTypeBadNonce,
code.CodeTypeOK,
code.CodeTypeOK,
code.CodeTypeOK,
code.CodeTypeOK,
code.CodeTypeBadNonce,
}, nil)
},
func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
})
@ -406,7 +427,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
case "commit":
return cmdCommit(cmd, actualArgs)
case "deliver_tx":
return cmdDeliverTx(cmd, actualArgs)
return cmdFinalizeBlock(cmd, actualArgs)
case "echo":
return cmdEcho(cmd, actualArgs)
case "info":
@ -475,7 +496,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
const codeBad uint32 = 10
// Append a new tx to application
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
@ -487,16 +508,18 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.DeliverTx(cmd.Context(), types.RequestDeliverTx{Tx: txBytes})
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
for _, tx := range res.Txs {
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,
Info: tx.Info,
Log: tx.Log,
})
}
return nil
}


+ 27
- 65
abci/example/example_test.go View File

@ -6,7 +6,6 @@ import (
"math/rand"
"net"
"os"
"reflect"
"testing"
"time"
@ -35,7 +34,7 @@ func TestKVStore(t *testing.T) {
logger := log.NewTestingLogger(t)
logger.Info("### Testing KVStore")
testStream(ctx, t, logger, kvstore.NewApplication())
testBulk(ctx, t, logger, kvstore.NewApplication())
}
func TestBaseApp(t *testing.T) {
@ -44,7 +43,7 @@ func TestBaseApp(t *testing.T) {
logger := log.NewTestingLogger(t)
logger.Info("### Testing BaseApp")
testStream(ctx, t, logger, types.NewBaseApplication())
testBulk(ctx, t, logger, types.NewBaseApplication())
}
func TestGRPC(t *testing.T) {
@ -57,10 +56,10 @@ func TestGRPC(t *testing.T) {
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
}
func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
t.Helper()
const numDeliverTxs = 20000
const numDeliverTxs = 700000
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
@ -77,51 +76,22 @@ func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.
err = client.Start(ctx)
require.NoError(t, err)
done := make(chan struct{})
counter := 0
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
// Process response
switch r := res.Value.(type) {
case *types.Response_DeliverTx:
counter++
if r.DeliverTx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
}
case *types.Response_Flush:
// ignore
default:
t.Error("Unexpected response type", reflect.TypeOf(res.Value))
}
})
// Write requests
// Construct request
rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
require.NoError(t, err)
// Sometimes send flush messages
if counter%128 == 0 {
err = client.Flush(ctx)
require.NoError(t, err)
}
rfb.Txs[counter] = []byte("test")
}
// Send bulk request
res, err := client.FinalizeBlock(ctx, rfb)
require.NoError(t, err)
require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match")
for _, tx := range res.Txs {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
// Send final flush message
_, err = client.FlushAsync(ctx)
err = client.Flush(ctx)
require.NoError(t, err)
<-done
}
//-------------------------
@ -133,7 +103,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) {
t.Helper()
numDeliverTxs := 2000
numDeliverTxs := 680000
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
@ -142,7 +112,7 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
require.NoError(t, server.Start(ctx))
t.Cleanup(func() { server.Wait() })
t.Cleanup(server.Wait)
// Connect to the socket
conn, err := grpc.Dial(socket,
@ -159,25 +129,17 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
client := types.NewABCIApplicationClient(conn)
// Write requests
// Construct request
rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(ctx, &types.RequestDeliverTx{Tx: []byte("test")})
require.NoError(t, err, "Error in GRPC DeliverTx")
counter++
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
rfb.Txs[counter] = []byte("test")
}
// Send request
response, err := client.FinalizeBlock(ctx, &rfb)
require.NoError(t, err, "Error in GRPC FinalizeBlock")
require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match")
for _, tx := range response.Txs {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
}

+ 12
- 5
abci/example/kvstore/kvstore.go View File

@ -86,14 +86,13 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
}
// tx is either "key=value" or just arbitrary bytes
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
func (app *Application) HandleTx(tx []byte) *types.ResponseDeliverTx {
var key, value string
parts := bytes.Split(req.Tx, []byte("="))
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
} else {
key, value = string(req.Tx), string(req.Tx)
key, value = string(tx), string(tx)
}
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
@ -114,7 +113,15 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
txs := make([]*types.ResponseDeliverTx, len(req.Txs))
for i, tx := range req.Txs {
txs[i] = app.HandleTx(tx)
}
return types.ResponseFinalizeBlock{Txs: txs}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {


+ 26
- 33
abci/example/kvstore/kvstore_test.go View File

@ -3,7 +3,6 @@ package kvstore
import (
"context"
"fmt"
"os"
"sort"
"testing"
@ -25,12 +24,14 @@ const (
)
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestDeliverTx{Tx: tx}
ar := app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// repeating tx doesn't raise error
ar = app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
ar = app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// commit
app.Commit()
@ -72,10 +73,7 @@ func TestKVStoreKV(t *testing.T) {
}
func TestPersistentKVStoreKV(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
dir := t.TempDir()
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
@ -90,10 +88,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
}
func TestPersistentKVStoreInfo(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
dir := t.TempDir()
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
@ -111,8 +106,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := tmproto.Header{
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@ -124,10 +118,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
// add a validator, remove a validator, update a validator
func TestValUpdates(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
dir := t.TempDir()
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
@ -204,16 +195,16 @@ func makeApplyBlock(
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Height: height,
Txs: txs,
})
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
}
@ -330,13 +321,15 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
}
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
ar, err := app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx})
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx})
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// repeating FinalizeBlock doesn't raise error
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// commit
_, err = app.Commit(ctx)
require.NoError(t, err)


+ 24
- 22
abci/example/kvstore/persistent_kvstore.go View File

@ -64,21 +64,21 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
func (app *PersistentKVStoreApplication) HandleTx(tx []byte) *types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(req.Tx) {
if isValidatorTx(tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(req.Tx)
return app.execValidatorTx(tx)
}
if isPrepareTx(req.Tx) {
return app.execPrepareTx(req.Tx)
if isPrepareTx(tx) {
return app.execPrepareTx(tx)
}
// otherwise, update the key-value store
return app.app.DeliverTx(req)
return app.app.HandleTx(tx)
}
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
@ -121,7 +121,9 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
}
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// Execute transactions
// Update the validator set
func (app *PersistentKVStoreApplication) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
@ -143,12 +145,12 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
}
}
return types.ResponseBeginBlock{}
}
respTxs := make([]*types.ResponseDeliverTx, len(req.Txs))
for i, tx := range req.Txs {
respTxs[i] = app.HandleTx(tx)
}
// Update the validator set
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates}
}
func (app *PersistentKVStoreApplication) ListSnapshots(
@ -238,13 +240,13 @@ func isValidatorTx(tx []byte) bool {
// format is "val:pubkey!power"
// pubkey is a base64-encoded 32-byte ed25519 key
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
tx = tx[len(ValidatorSetChangePrefix):]
// get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "!")
if len(pubKeyAndPower) != 2 {
return types.ResponseDeliverTx{
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
}
@ -253,7 +255,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
// decode the pubkey
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
if err != nil {
return types.ResponseDeliverTx{
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
}
@ -261,7 +263,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
// decode the power
power, err := strconv.ParseInt(powerS, 10, 64)
if err != nil {
return types.ResponseDeliverTx{
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
}
@ -271,7 +273,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
}
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx {
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
panic(fmt.Errorf("can't decode public key: %w", err))
@ -286,7 +288,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
}
if !hasKey {
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
return types.ResponseDeliverTx{
return &types.ResponseDeliverTx{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
}
@ -298,7 +300,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
if err := types.WriteMessage(&v, value); err != nil {
return types.ResponseDeliverTx{
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("error encoding validator: %v", err)}
}
@ -311,7 +313,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
// we only update the changes array if we successfully updated the tree
app.ValUpdates = append(app.ValUpdates, v)
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
return &types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
// -----------------------------
@ -324,9 +326,9 @@ func isPrepareTx(tx []byte) bool {
// execPrepareTx is noop. tx data is considered as placeholder
// and is substitute at the PrepareProposal.
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx {
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) *types.ResponseDeliverTx {
// noop
return types.ResponseDeliverTx{}
return &types.ResponseDeliverTx{}
}
// substPrepareTx subst all the preparetx in the blockdata


+ 3
- 9
abci/server/socket_server.go View File

@ -213,9 +213,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_Info:
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(*r.DeliverTx)
responses <- types.ToResponseDeliverTx(res)
case *types.Request_CheckTx:
res := s.app.CheckTx(*r.CheckTx)
responses <- types.ToResponseCheckTx(res)
@ -228,12 +225,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_InitChain:
res := s.app.InitChain(*r.InitChain)
responses <- types.ToResponseInitChain(res)
case *types.Request_BeginBlock:
res := s.app.BeginBlock(*r.BeginBlock)
responses <- types.ToResponseBeginBlock(res)
case *types.Request_EndBlock:
res := s.app.EndBlock(*r.EndBlock)
responses <- types.ToResponseEndBlock(res)
case *types.Request_ListSnapshots:
res := s.app.ListSnapshots(*r.ListSnapshots)
responses <- types.ToResponseListSnapshots(res)
@ -258,6 +249,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_VerifyVoteExtension:
res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension)
responses <- types.ToResponseVerifyVoteExtension(res)
case *types.Request_FinalizeBlock:
res := s.app.FinalizeBlock(*r.FinalizeBlock)
responses <- types.ToResponseFinalizeBlock(res)
default:
responses <- types.ToResponseException("Unknown request")
}


+ 17
- 15
abci/tests/server/client.go View File

@ -49,22 +49,24 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
return nil
}
func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTx(ctx, types.RequestDeliverTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("deliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("deliverTx error")
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
for i, tx := range res.Txs {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp[i] {
fmt.Println("Failed test: FinalizeBlock")
fmt.Printf("FinalizeBlock response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("FinalizeBlock error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: FinalizeBlock")
fmt.Printf("FinalizeBlock response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("FinalizeBlock error")
}
}
fmt.Println("Passed test: DeliverTx")
fmt.Println("Passed test: FinalizeBlock")
return nil
}


+ 18
- 33
abci/types/application.go View File

@ -20,18 +20,14 @@ type Application interface {
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
ProcessProposal(RequestProcessProposal) ResponseProcessProposal
// Signals the beginning of a block
BeginBlock(RequestBeginBlock) ResponseBeginBlock
// Deliver a tx for full processing
DeliverTx(RequestDeliverTx) ResponseDeliverTx
// Signals the end of a block, returns changes to the validator set
EndBlock(RequestEndBlock) ResponseEndBlock
// Commit the state and return the application Merkle root hash
Commit() ResponseCommit
// Create application specific vote extension
ExtendVote(RequestExtendVote) ResponseExtendVote
// Verify application's vote extension data
VerifyVoteExtension(RequestVerifyVoteExtension) ResponseVerifyVoteExtension
// Deliver the decided block with its txs to the Application
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
// State Sync Connection
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
@ -56,10 +52,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
@ -86,14 +78,6 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
return ResponseInitChain{}
}
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
return ResponseBeginBlock{}
}
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
return ResponseEndBlock{}
}
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
return ResponseListSnapshots{}
}
@ -118,6 +102,16 @@ func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProce
return ResponseProcessProposal{}
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
txs := make([]*ResponseDeliverTx, len(req.Txs))
for i := range req.Txs {
txs[i] = &ResponseDeliverTx{Code: CodeTypeOK}
}
return ResponseFinalizeBlock{
Txs: txs,
}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
@ -142,11 +136,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
return &res, nil
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(*req)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(*req)
return &res, nil
@ -167,16 +156,6 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
return &res, nil
}
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
res := app.app.BeginBlock(*req)
return &res, nil
}
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
res := app.app.EndBlock(*req)
return &res, nil
}
func (app *GRPCApplication) ListSnapshots(
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
res := app.app.ListSnapshots(*req)
@ -224,3 +203,9 @@ func (app *GRPCApplication) ProcessProposal(
res := app.app.ProcessProposal(*req)
return &res, nil
}
func (app *GRPCApplication) FinalizeBlock(
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
res := app.app.FinalizeBlock(*req)
return &res, nil
}

+ 13
- 35
abci/types/messages.go View File

@ -4,6 +4,7 @@ import (
"io"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/libs/protoio"
)
@ -44,12 +45,6 @@ func ToRequestInfo(req RequestInfo) *Request {
}
}
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
return &Request{
Value: &Request_DeliverTx{&req},
}
}
func ToRequestCheckTx(req RequestCheckTx) *Request {
return &Request{
Value: &Request_CheckTx{&req},
@ -74,18 +69,6 @@ func ToRequestInitChain(req RequestInitChain) *Request {
}
}
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
return &Request{
Value: &Request_BeginBlock{&req},
}
}
func ToRequestEndBlock(req RequestEndBlock) *Request {
return &Request{
Value: &Request_EndBlock{&req},
}
}
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
return &Request{
Value: &Request_ListSnapshots{&req},
@ -134,6 +117,12 @@ func ToRequestProcessProposal(req RequestProcessProposal) *Request {
}
}
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
return &Request{
Value: &Request_FinalizeBlock{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
@ -159,11 +148,6 @@ func ToResponseInfo(res ResponseInfo) *Response {
Value: &Response_Info{&res},
}
}
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
return &Response{
Value: &Response_DeliverTx{&res},
}
}
func ToResponseCheckTx(res ResponseCheckTx) *Response {
return &Response{
@ -189,18 +173,6 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
}
}
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
return &Response{
Value: &Response_BeginBlock{&res},
}
}
func ToResponseEndBlock(res ResponseEndBlock) *Response {
return &Response{
Value: &Response_EndBlock{&res},
}
}
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
return &Response{
Value: &Response_ListSnapshots{&res},
@ -248,3 +220,9 @@ func ToResponseProcessProposal(res ResponseProcessProposal) *Response {
Value: &Response_ProcessProposal{&res},
}
}
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
return &Response{
Value: &Response_FinalizeBlock{&res},
}
}

+ 1516
- 2198
abci/types/types.pb.go
File diff suppressed because it is too large
View File


+ 46
- 0
cmd/tendermint/commands/completion.go View File

@ -0,0 +1,46 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
)
// NewCompletionCmd returns a cobra.Command that generates bash and zsh
// completion scripts for the given root command. If hidden is true, the
// command will not show up in the root command's list of available commands.
func NewCompletionCmd(rootCmd *cobra.Command, hidden bool) *cobra.Command {
flagZsh := "zsh"
cmd := &cobra.Command{
Use: "completion",
Short: "Generate shell completion scripts",
Long: fmt.Sprintf(`Generate Bash and Zsh completion scripts and print them to STDOUT.
Once saved to file, a completion script can be loaded in the shell's
current session as shown:
$ . <(%s completion)
To configure your bash shell to load completions for each session add to
your $HOME/.bashrc or $HOME/.profile the following instruction:
. <(%s completion)
`, rootCmd.Use, rootCmd.Use),
RunE: func(cmd *cobra.Command, _ []string) error {
zsh, err := cmd.Flags().GetBool(flagZsh)
if err != nil {
return err
}
if zsh {
return rootCmd.GenZshCompletion(cmd.OutOrStdout())
}
return rootCmd.GenBashCompletion(cmd.OutOrStdout())
},
Hidden: hidden,
Args: cobra.NoArgs,
}
cmd.Flags().Bool(flagZsh, false, "Generate Zsh completion script")
return cmd
}

+ 1
- 0
cmd/tendermint/commands/key_migrate.go View File

@ -5,6 +5,7 @@ import (
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/scripts/keymigrate"


+ 4
- 5
cmd/tendermint/commands/reindex_event.go View File

@ -199,10 +199,9 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultFinalizeBlock: *r.FinalizeBlock,
}
var batch *indexer.Batch
@ -214,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
Height: b.Height,
Index: uint32(i),
Tx: b.Data.Txs[i],
Result: *(r.DeliverTxs[i]),
Result: *(r.FinalizeBlock.Txs[i]),
}
_ = batch.Add(&tr)


+ 6
- 5
cmd/tendermint/commands/reindex_event_test.go View File

@ -9,6 +9,8 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/state/indexer"
@ -16,7 +18,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
_ "github.com/lib/pq" // for the psql sink
)
@ -110,7 +111,7 @@ func TestLoadEventSink(t *testing.T) {
}
func TestLoadBlockStore(t *testing.T) {
testCfg, err := config.ResetTestRoot(t.Name())
testCfg, err := config.ResetTestRoot(t.TempDir(), t.Name())
require.NoError(t, err)
testCfg.DBBackend = "goleveldb"
_, _, err = loadStateAndBlockStore(testCfg)
@ -154,9 +155,9 @@ func TestReIndexEvent(t *testing.T) {
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
Txs: []*abcitypes.ResponseDeliverTx{&dtx},
},
}
mockStateStore.


+ 1
- 0
cmd/tendermint/commands/replay.go View File

@ -2,6 +2,7 @@ package commands
import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/libs/log"


+ 10
- 6
cmd/tendermint/commands/rollback_test.go View File

@ -19,10 +19,12 @@ func TestRollbackIntegration(t *testing.T) {
dir := t.TempDir()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg, err := rpctest.CreateConfig(t.Name())
cfg, err := rpctest.CreateConfig(t, t.Name())
require.NoError(t, err)
cfg.BaseConfig.DBBackend = "goleveldb"
app, err := e2e.NewApplication(e2e.DefaultConfig(dir))
require.NoError(t, err)
t.Run("First run", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
@ -30,27 +32,29 @@ func TestRollbackIntegration(t *testing.T) {
require.NoError(t, err)
node, _, err := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
require.NoError(t, err)
require.True(t, node.IsRunning())
time.Sleep(3 * time.Second)
cancel()
node.Wait()
require.False(t, node.IsRunning())
})
t.Run("Rollback", func(t *testing.T) {
time.Sleep(time.Second)
require.NoError(t, app.Rollback())
height, _, err = commands.RollbackState(cfg)
require.NoError(t, err)
require.NoError(t, err, "%d", height)
})
t.Run("Restart", func(t *testing.T) {
require.True(t, height > 0, "%d", height)
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
require.NoError(t, err2)
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
client, err := local.New(logger, node2.(local.NodeService))
require.NoError(t, err)


+ 27
- 6
cmd/tendermint/commands/root_test.go View File

@ -1,6 +1,7 @@
package commands
import (
"context"
"fmt"
"os"
"path/filepath"
@ -17,6 +18,17 @@ import (
tmos "github.com/tendermint/tendermint/libs/os"
)
// writeConfigVals writes a toml file with the given values.
// It returns an error if writing was impossible.
func writeConfigVals(dir string, vals map[string]string) error {
data := ""
for k, v := range vals {
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
}
cfile := filepath.Join(dir, "config.toml")
return os.WriteFile(cfile, []byte(data), 0600)
}
// clearConfig clears env vars, the given root dir, and resets viper.
func clearConfig(t *testing.T, dir string) *cfg.Config {
t.Helper()
@ -41,7 +53,7 @@ func testRootCmd(conf *cfg.Config) *cobra.Command {
return cmd
}
func testSetup(t *testing.T, conf *cfg.Config, args []string, env map[string]string) error {
func testSetup(ctx context.Context, t *testing.T, conf *cfg.Config, args []string, env map[string]string) error {
t.Helper()
cmd := testRootCmd(conf)
@ -49,7 +61,7 @@ func testSetup(t *testing.T, conf *cfg.Config, args []string, env map[string]str
// run with the args and env
args = append([]string{cmd.Use}, args...)
return cli.RunWithArgs(cmd, args, env)
return cli.RunWithArgs(ctx, cmd, args, env)
}
func TestRootHome(t *testing.T) {
@ -65,11 +77,14 @@ func TestRootHome(t *testing.T) {
{nil, map[string]string{"TMHOME": newRoot}, newRoot},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i, tc := range cases {
t.Run(fmt.Sprint(i), func(t *testing.T) {
conf := clearConfig(t, tc.root)
err := testSetup(t, conf, tc.args, tc.env)
err := testSetup(ctx, t, conf, tc.args, tc.env)
require.NoError(t, err)
require.Equal(t, tc.root, conf.RootDir)
@ -99,11 +114,14 @@ func TestRootFlagsEnv(t *testing.T) {
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i, tc := range cases {
t.Run(fmt.Sprint(i), func(t *testing.T) {
conf := clearConfig(t, defaultDir)
err := testSetup(t, conf, tc.args, tc.env)
err := testSetup(ctx, t, conf, tc.args, tc.env)
require.NoError(t, err)
assert.Equal(t, tc.logLevel, conf.LogLevel)
@ -113,6 +131,9 @@ func TestRootFlagsEnv(t *testing.T) {
}
func TestRootConfig(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// write non-default config
nonDefaultLogLvl := "debug"
cvals := map[string]string{
@ -142,14 +163,14 @@ func TestRootConfig(t *testing.T) {
// write the non-defaults to a different path
// TODO: support writing sub configs so we can test that too
err = WriteConfigVals(configFilePath, cvals)
err = writeConfigVals(configFilePath, cvals)
require.NoError(t, err)
cmd := testRootCmd(conf)
// run with the args and env
tc.args = append([]string{cmd.Use}, tc.args...)
err = cli.RunWithArgs(cmd, tc.args, tc.env)
err = cli.RunWithArgs(ctx, cmd, tc.args, tc.env)
require.NoError(t, err)
require.Equal(t, tc.logLvl, conf.LogLevel)


+ 1
- 1
cmd/tendermint/commands/run_node.go View File

@ -117,7 +117,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger lo
return fmt.Errorf("failed to start node: %w", err)
}
logger.Info("started node", "node", n.String())
logger.Info("started node", "chain", conf.ChainID())
<-ctx.Done()
return nil


+ 1
- 0
cmd/tendermint/commands/show_node_id.go View File

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/config"
)


+ 2
- 2
cmd/tendermint/main.go View File

@ -44,7 +44,7 @@ func main() {
commands.MakeRollbackStateCommand(conf),
commands.MakeKeyMigrateCommand(conf, logger),
debug.DebugCmd,
cli.NewCompletionCmd(rcmd, true),
commands.NewCompletionCmd(rcmd, true),
)
// NOTE:
@ -60,7 +60,7 @@ func main() {
// Create & start node
rcmd.AddCommand(commands.NewRunNodeCmd(nodeFunc, conf, logger))
if err := rcmd.ExecuteContext(ctx); err != nil {
if err := cli.RunWithTrace(ctx, rcmd); err != nil {
panic(err)
}
}

+ 4
- 4
config/toml.go View File

@ -504,13 +504,13 @@ namespace = "{{ .Instrumentation.Namespace }}"
/****** these are for test settings ***********/
func ResetTestRoot(testName string) (*Config, error) {
return ResetTestRootWithChainID(testName, "")
func ResetTestRoot(dir, testName string) (*Config, error) {
return ResetTestRootWithChainID(dir, testName, "")
}
func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) {
func ResetTestRootWithChainID(dir, testName string, chainID string) (*Config, error) {
// create a unique, concurrency-safe test directory under os.TempDir()
rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName))
rootDir, err := os.MkdirTemp(dir, fmt.Sprintf("%s-%s_", chainID, testName))
if err != nil {
return nil, err
}


+ 2
- 4
config/toml_test.go View File

@ -20,9 +20,7 @@ func ensureFiles(t *testing.T, rootDir string, files ...string) {
func TestEnsureRoot(t *testing.T) {
// setup temp dir for test
tmpDir, err := os.MkdirTemp("", "config-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
// create root dir
EnsureRoot(tmpDir)
@ -42,7 +40,7 @@ func TestEnsureTestRoot(t *testing.T) {
testName := "ensureTestRoot"
// create root dir
cfg, err := ResetTestRoot(testName)
cfg, err := ResetTestRoot(t.TempDir(), testName)
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
rootDir := cfg.RootDir


+ 1
- 0
crypto/ed25519/bench_test.go View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
)


+ 66
- 1
crypto/secp256k1/secp256k1.go View File

@ -9,11 +9,12 @@ import (
"math/big"
secp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/jsontypes"
// necessary for Bitcoin address format
"golang.org/x/crypto/ripemd160" // nolint
"golang.org/x/crypto/ripemd160" //nolint:staticcheck
)
//-------------------------------------
@ -178,3 +179,67 @@ func (pubKey PubKey) Equals(other crypto.PubKey) bool {
func (pubKey PubKey) Type() string {
return KeyType
}
// used to reject malleable signatures
// see:
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
// The returned signature will be of the form R || S (in lower-S form).
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
sig, err := priv.Sign(crypto.Sha256(msg))
if err != nil {
return nil, err
}
sigBytes := serializeSig(sig)
return sigBytes, nil
}
// VerifySignature verifies a signature of the form R || S.
// It rejects signatures which are not in lower-S form.
func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
if len(sigStr) != 64 {
return false
}
pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256())
if err != nil {
return false
}
// parse the signature:
signature := signatureFromBytes(sigStr)
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
if signature.S.Cmp(secp256k1halfN) > 0 {
return false
}
return signature.Verify(crypto.Sha256(msg), pub)
}
// Read Signature struct from R || S. Caller needs to ensure
// that len(sigStr) == 64.
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
return &secp256k1.Signature{
R: new(big.Int).SetBytes(sigStr[:32]),
S: new(big.Int).SetBytes(sigStr[32:64]),
}
}
// Serialize signature to R || S.
// R, S are padded to 32 bytes respectively.
func serializeSig(sig *secp256k1.Signature) []byte {
rBytes := sig.R.Bytes()
sBytes := sig.S.Bytes()
sigBytes := make([]byte, 64)
// 0 pad the byte arrays from the left if they aren't big enough.
copy(sigBytes[32-len(rBytes):32], rBytes)
copy(sigBytes[64-len(sBytes):64], sBytes)
return sigBytes
}

+ 0
- 76
crypto/secp256k1/secp256k1_nocgo.go View File

@ -1,76 +0,0 @@
//go:build !libsecp256k1
// +build !libsecp256k1
package secp256k1
import (
"math/big"
secp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/tendermint/tendermint/crypto"
)
// used to reject malleable signatures
// see:
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
// The returned signature will be of the form R || S (in lower-S form).
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
sig, err := priv.Sign(crypto.Sha256(msg))
if err != nil {
return nil, err
}
sigBytes := serializeSig(sig)
return sigBytes, nil
}
// VerifySignature verifies a signature of the form R || S.
// It rejects signatures which are not in lower-S form.
func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
if len(sigStr) != 64 {
return false
}
pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256())
if err != nil {
return false
}
// parse the signature:
signature := signatureFromBytes(sigStr)
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
if signature.S.Cmp(secp256k1halfN) > 0 {
return false
}
return signature.Verify(crypto.Sha256(msg), pub)
}
// Read Signature struct from R || S. Caller needs to ensure
// that len(sigStr) == 64.
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
return &secp256k1.Signature{
R: new(big.Int).SetBytes(sigStr[:32]),
S: new(big.Int).SetBytes(sigStr[32:64]),
}
}
// Serialize signature to R || S.
// R, S are padded to 32 bytes respectively.
func serializeSig(sig *secp256k1.Signature) []byte {
rBytes := sig.R.Bytes()
sBytes := sig.S.Bytes()
sigBytes := make([]byte, 64)
// 0 pad the byte arrays from the left if they aren't big enough.
copy(sigBytes[32-len(rBytes):32], rBytes)
copy(sigBytes[64-len(sBytes):64], sBytes)
return sigBytes
}

+ 1
- 0
crypto/sr25519/bench_test.go View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
)


+ 3
- 3
docs/architecture/adr-071-proposer-based-timestamps.md View File

@ -61,7 +61,7 @@ The following protocols and application features require a reliable source of ti
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/spec/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification).
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime).
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.43/ibc/overview.html#acknowledgements).
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements)
Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate.
This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear).
@ -116,7 +116,7 @@ This timestamp is therefore no longer useful as part of consensus and may option
type Vote struct {
Type tmproto.SignedMsgType `json:"type"`
Height int64 `json:"height"`
Round int32 `json:"round"`
Round int32 `json:"round"`
BlockID BlockID `json:"block_id"` // zero if vote is nil.
-- Timestamp time.Time `json:"timestamp"`
ValidatorAddress Address `json:"validator_address"`
@ -135,7 +135,7 @@ A validator will only Prevote a proposal if the proposal timestamp is considered
A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator.
More specifically, a proposal timestamp is `timely` if `proposalTimestamp - PRECISION ≤ validatorLocalTime ≤ proposalTimestamp + PRECISION + MSGDELAY`.
Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/types/params.proto#L13) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration).
Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/spec/blob/master/proto/tendermint/types/params.proto#L11) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration).
The consensus parameters will be updated to include this `Synchrony` field as follows:


rfc/001-block-retention.md → docs/architecture/adr-077-block-retention.md View File


rfc/002-nonzero-genesis.md → docs/architecture/adr-078-nonzero-genesis.md View File


rfc/003-ed25519-verification.md → docs/architecture/adr-079-ed25519-verification.md View File


rfc/005-reverse-sync.md → docs/architecture/adr-080-reverse-sync.md View File


rfc/images/block-retention.png → docs/architecture/img/block-retention.png View File


+ 1
- 1
docs/pre.sh View File

@ -1,4 +1,4 @@
#!/bin/bash
cp -a ../rpc/openapi/ .vuepress/public/rpc/
git clone https://github.com/tendermint/spec.git specRepo && cp -r specRepo/spec . && rm -rf specRepo
cp -r ../spec .

+ 4
- 1
docs/rfc/README.md View File

@ -41,12 +41,15 @@ sections.
- [RFC-001: Storage Engines](./rfc-001-storage-engine.rst)
- [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md)
- [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md)
- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md)
- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.rst)
- [RFC-005: Event System](./rfc-005-event-system.rst)
- [RFC-006: Event Subscription](./rfc-006-event-subscription.md)
- [RFC-007: Deterministic Proto Byte Serialization](./rfc-007-deterministic-proto-bytes.md)
- [RFC-008: Don't Panic](./rfc-008-don't-panic.md)
- [RFC-009: Consensus Parameter Upgrades](./rfc-009-consensus-parameter-upgrades.md)
- [RFC-010: P2P Light Client](./rfc-010-p2p-light-client.rst)
- [RFC-011: Delete Gas](./rfc-011-delete-gas.md)
- [RFC-013: ABCI++](./rfc-013-abci++.md)
- [RFC-014: Semantic Versioning](./rfc-014-semantic-versioning.md)
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->

rfc/images/abci++.png → docs/rfc/images/abci++.png View File


rfc/images/abci.png → docs/rfc/images/abci.png View File


+ 257
- 0
docs/rfc/rfc-011-abci++.md View File

@ -0,0 +1,257 @@
<<<<<<< HEAD:docs/rfc/rfc-011-abci++.md
# RFC 011: ABCI++
=======
# RFC 013: ABCI++
>>>>>>> a895a8ea5f (Rename and renumber imported RFCs.):docs/rfc/rfc-013-abci++.md
## Changelog
- 2020-01-11: initialized
- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254))
## Author(s)
- Dev (@valardragon)
- Sunny (@sunnya97)
## Context
ABCI is the interface between the consensus engine and the application.
It defines when the application can talk to consensus during the execution of a blockchain.
At the moment, the application can only act at one phase in consensus, immediately after a block has been finalized.
This restriction on the application prohibits numerous features for the application, including many scalability improvements that are now better understood than when ABCI was first written.
For example, many of the scalability proposals can be boiled down to "Make the miner / block proposers / validators do work, so the network does not have to".
This includes optimizations such as tx-level signature aggregation, state transition proofs, etc.
Furthermore, many new security properties cannot be achieved in the current paradigm, as the application cannot enforce validators do more than just finalize txs.
This includes features such as threshold cryptography, and guaranteed IBC connection attempts.
We propose introducing three new phases to ABCI to enable these new features, and renaming the existing methods for block execution.
#### Prepare Proposal phase
This phase aims to allow the block proposer to perform more computation, to reduce load on all other full nodes, and light clients in the network.
It is intended to enable features such as batch optimizations on the transaction data (e.g. signature aggregation, zk rollup style validity proofs, etc.), enabling stateless blockchains with validator provided authentication paths, etc.
This new phase will only be executed by the block proposer. The application will take in the block header and raw transaction data output by the consensus engine's mempool. It will then return block data that is prepared for gossip on the network, and additional fields to include into the block header.
#### Process Proposal Phase
This phase aims to allow applications to determine validity of a new block proposal, and execute computation on the block data, prior to the blocks finalization.
It is intended to enable applications to reject block proposals with invalid data, and to enable alternate pipelined execution models. (Such as Ethereum-style immediate execution)
This phase will be executed by all full nodes upon receiving a block, though on the application side it can do more work in the even that the current node is a validator.
#### Vote Extension Phase
This phase aims to allow applications to require their validators do more than just validate blocks.
Example usecases of this include validator determined price oracles, validator guaranteed IBC connection attempts, and validator based threshold crypto.
This adds an app-determined data field that every validator must include with their vote, and these will thus appear in the header.
#### Rename {BeginBlock, [DeliverTx], EndBlock} to FinalizeBlock
The prior phases gives the application more flexibility in their execution model for a block, and they obsolete the current methods for how the consensus engine relates the block data to the state machine. Thus we refactor the existing methods to better reflect what is happening in the new ABCI model.
This rename doesn't on its own enable anything new, but instead improves naming to clarify the expectations from the application in this new communication model. The existing ABCI methods `BeginBlock, [DeliverTx], EndBlock` are renamed to a single method called `FinalizeBlock`.
#### Summary
We include a more detailed list of features / scaling improvements that are blocked, and which new phases resolve them at the end of this document.
<image src="images/abci.png" style="float: left; width: 40%;" /> <image src="images/abci++.png" style="float: right; width: 40%;" />
On the top is the existing definition of ABCI, and on the bottom is the proposed ABCI++.
## Proposal
Below we suggest an API to add these three new phases.
In this document, sometimes the final round of voting is referred to as precommit for clarity in how it acts in the Tendermint case.
### Prepare Proposal
*Note, APIs in this section will change after Vote Extensions, we list the adjusted APIs further in the proposal.*
The Prepare Proposal phase allows the block proposer to perform application-dependent work in a block, to lower the amount of work the rest of the network must do. This enables batch optimizations to a block, which has been empirically demonstrated to be a key component for scaling. This phase introduces the following ABCI method
```rust
fn PrepareProposal(Block) -> BlockData
```
where `BlockData` is a type alias for however data is internally stored within the consensus engine. In Tendermint Core today, this is `[]Tx`.
The application may read the entire block proposal, and mutate the block data fields. Mutated transactions will still get removed from the mempool later on, as the mempool rechecks all transactions after a block is executed.
The `PrepareProposal` API will be modified in the vote extensions section, for allowing the application to modify the header.
### Process Proposal
The Process Proposal phase sends the block data to the state machine, prior to running the last round of votes on the state machine. This enables features such as allowing validators to reject a block according to whether state machine deems it valid, and changing block execution pipeline.
We introduce three new methods,
```rust
fn VerifyHeader(header: Header, isValidator: bool) -> ResponseVerifyHeader {...}
fn ProcessProposal(block: Block) -> ResponseProcessProposal {...}
fn RevertProposal(height: usize, round: usize) {...}
```
where
```rust
struct ResponseVerifyHeader {
accept_header: bool,
evidence: Vec<Evidence>
}
struct ResponseProcessProposal {
accept_block: bool,
evidence: Vec<Evidence>
}
```
Upon receiving a block header, every validator runs `VerifyHeader(header, isValidator)`. The reason for why `VerifyHeader` is split from `ProcessProposal` is due to the later sections for Preprocess Proposal and Vote Extensions, where there may be application dependent data in the header that must be verified before accepting the header.
If the returned `ResponseVerifyHeader.accept_header` is false, then the validator must precommit nil on this block, and reject all other precommits on this block. `ResponseVerifyHeader.evidence` is appended to the validators local `EvidencePool`.
Upon receiving an entire block proposal (in the current implementation, all "block parts"), every validator runs `ProcessProposal(block)`. If the returned `ResponseProcessProposal.accept_block` is false, then the validator must precommit nil on this block, and reject all other precommits on this block. `ResponseProcessProposal.evidence` is appended to the validators local `EvidencePool`.
Once a validator knows that consensus has failed to be achieved for a given block, it must run `RevertProposal(block.height, block.round)`, in order to signal to the application to revert any potentially mutative state changes it may have made. In Tendermint, this occurs when incrementing rounds.
**RFC**: How do we handle the scenario where honest node A finalized on round x, and honest node B finalized on round x + 1? (e.g. when 2f precommits are publicly known, and a validator precommits themself but doesn't broadcast, but they increment rounds) Is this a real concern? The state root derived could change if everyone finalizes on round x+1, not round x, as the state machine can depend non-uniformly on timestamp.
The application is expected to cache the block data for later execution.
The `isValidator` flag is set according to whether the current node is a validator or a full node. This is intended to allow for beginning validator-dependent computation that will be included later in vote extensions. (An example of this is threshold decryptions of ciphertexts.)
### DeliverTx rename to FinalizeBlock
After implementing `ProcessProposal`, txs no longer need to be delivered during the block execution phase. Instead, they are already in the state machine. Thus `BeginBlock, DeliverTx, EndBlock` can all be replaced with a single ABCI method for `ExecuteBlock`. Internally the application may still structure its method for executing the block as `BeginBlock, DeliverTx, EndBlock`. However, it is overly restrictive to enforce that the block be executed after it is finalized. There are multiple other, very reasonable pipelined execution models one can go for. So instead we suggest calling this succession of methods `FinalizeBlock`. We propose the following API
Replace the `BeginBlock, DeliverTx, EndBlock` ABCI methods with the following method
```rust
fn FinalizeBlock() -> ResponseFinalizeBlock
```
where `ResponseFinalizeBlock` has the following API, in terms of what already exists
```rust
struct ResponseFinalizeBlock {
updates: ResponseEndBlock,
tx_results: Vec<ResponseDeliverTx>
}
```
`ResponseEndBlock` should then be renamed to `ConsensusUpdates` and `ResponseDeliverTx` should be renamed to `ResponseTx`.
### Vote Extensions
The Vote Extensions phase allow applications to force their validators to do more than just validate within consensus. This is done by allowing the application to add more data to their votes, in the final round of voting. (Namely the precommit)
This additional application data will then appear in the block header.
First we discuss the API changes to the vote struct directly
```rust
fn ExtendVote(height: u64, round: u64) -> (UnsignedAppVoteData, SelfAuthenticatingAppData)
fn VerifyVoteExtension(signed_app_vote_data: Vec<u8>, self_authenticating_app_vote_data: Vec<u8>) -> bool
```
There are two types of data that the application can enforce validators to include with their vote.
There is data that the app needs the validator to sign over in their vote, and there can be self-authenticating vote data. Self-authenticating here means that the application upon seeing these bytes, knows its valid, came from the validator and is non-malleable. We give an example of each type of vote data here, to make their roles clearer.
- Unsigned app vote data: A use case of this is if you wanted validator backed oracles, where each validator independently signs some oracle data in their vote, and the median of these values is used on chain. Thus we leverage consensus' signing process for convenience, and use that same key to sign the oracle data.
- Self-authenticating vote data: A use case of this is in threshold random beacons. Every validator produces a threshold beacon share. This threshold beacon share can be verified by any node in the network, given the share and the validators public key (which is not the same as its consensus public key). However, this decryption share will not make it into the subsequent block's header. They will be aggregated by the subsequent block proposer to get a single random beacon value that will appear in the subsequent block's header. Everyone can then verify that this aggregated value came from the requisite threshold of the validator set, without increasing the bandwidth for full nodes or light clients. To achieve this goal, the self-authenticating vote data cannot be signed over by the consensus key along with the rest of the vote, as that would require all full nodes & light clients to know this data in order to verify the vote.
The `CanonicalVote` struct will acommodate the `UnsignedAppVoteData` field by adding another string to its encoding, after the `chain-id`. This should not interfere with existing hardware signing integrations, as it does not affect the constant offset for the `height` and `round`, and the vote size does not have an explicit upper bound. (So adding this unsigned app vote data field is equivalent from the HSM's perspective as having a superlong chain-ID)
**RFC**: Please comment if you think it will be fine to have elongate the message the HSM signs, or if we need to explore pre-hashing the app vote data.
The flow of these methods is that when a validator has to precommit, Tendermint will first produce a precommit canonical vote without the application vote data. It will then pass it to the application, which will return unsigned application vote data, and self authenticating application vote data. It will bundle the `unsigned_application_vote_data` into the canonical vote, and pass it to the HSM to sign. Finally it will package the self-authenticating app vote data, and the `signed_vote_data` together, into one final Vote struct to be passed around the network.
#### Changes to Prepare Proposal Phase
There are many use cases where the additional data from vote extensions can be batch optimized.
This is mainly of interest when the votes include self-authenticating app vote data that be batched together, or the unsigned app vote data is the same across all votes.
To allow for this, we change the PrepareProposal API to the following
```rust
fn PrepareProposal(Block, UnbatchedHeader) -> (BlockData, Header)
```
where `UnbatchedHeader` essentially contains a "RawCommit", the `Header` contains a batch-optimized `commit` and an additional "Application Data" field in its root. This will involve a number of changes to core data structures, which will be gone over in the ADR.
The `Unbatched` header and `rawcommit` will never be broadcasted, they will be completely internal to consensus.
#### Inter-process communication (IPC) effects
For brevity in exposition above, we did not discuss the trade-offs that may occur in interprocess communication delays that these changs will introduce.
These new ABCI methods add more locations where the application must communicate with the consensus engine.
In most configurations, we expect that the consensus engine and the application will be either statically or dynamically linked, so all communication is a matter of at most adjusting the memory model the data is layed out within.
This memory model conversion is typically considered negligible, as delay here is measured on the order of microseconds at most, whereas we face milisecond delays due to cryptography and network overheads.
Thus we ignore the overhead in the case of linked libraries.
In the case where the consensus engine and the application are ran in separate processes, and thus communicate with a form of Inter-process communication (IPC), the delays can easily become on the order of miliseconds based upon the data sent. Thus its important to consider whats happening here.
We go through this phase by phase.
##### Prepare proposal IPC overhead
This requires a round of IPC communication, where both directions are quite large. Namely the proposer communicating an entire block to the application.
However, this can be mitigated by splitting up `PrepareProposal` into two distinct, async methods, one for the block IPC communication, and one for the Header IPC communication.
Then for chains where the block data does not depend on the header data, the block data IPC communication can proceed in parallel to the prior block's voting phase. (As a node can know whether or not its the leader in the next round)
Furthermore, this IPC communication is expected to be quite low relative to the amount of p2p gossip time it takes to send the block data around the network, so this is perhaps a premature concern until more sophisticated block gossip protocols are implemented.
##### Process Proposal IPC overhead
This phase changes the amount of time available for the consensus engine to deliver a block's data to the state machine.
Before, the block data for block N would be delivered to the state machine upon receiving a commit for block N and then be executed.
The state machine would respond after executing the txs and before prevoting.
The time for block delivery from the consensus engine to the state machine after this change is the time of receiving block proposal N to the to time precommit on proposal N.
It is expected that this difference is unimportant in practice, as this time is in parallel to one round of p2p communication for prevoting, which is expected to be significantly less than the time for the consensus engine to deliver a block to the state machine.
##### Vote Extension IPC overhead
This has a small amount of data, but does incur an IPC round trip delay. This IPC round trip delay is pretty negligible as compared the variance in vote gossip time. (the IPC delay is typically on the order of 10 microseconds)
## Status
Proposed
## Consequences
### Positive
- Enables a large number of new features for applications
- Supports both immediate and delayed execution models
- Allows application specific data from each validator
- Allows for batch optimizations across txs, and votes
### Negative
- This is a breaking change to all existing ABCI clients, however the application should be able to have a thin wrapper to replicate existing ABCI behavior.
- PrepareProposal - can be a no-op
- Process Proposal - has to cache the block, but can otherwise be a no-op
- Vote Extensions - can be a no-op
- Finalize Block - Can black-box call BeginBlock, DeliverTx, EndBlock given the cached block data
- Vote Extensions adds more complexity to core Tendermint Data Structures
- Allowing alternate alternate execution models will lead to a proliferation of new ways for applications to violate expected guarantees.
### Neutral
- IPC overhead considerations change, but mostly for the better
## References
Reference for IPC delay constants: <http://pages.cs.wisc.edu/~adityav/Evaluation_of_Inter_Process_Communication_Mechanisms.pdf>
### Short list of blocked features / scaling improvements with required ABCI++ Phases
| Feature | PrepareProposal | ProcessProposal | Vote Extensions |
| :--- | :---: | :---: | :---: |
| Tx based signature aggregation | X | | |
| SNARK proof of valid state transition | X | | |
| Validator provided authentication paths in stateless blockchains | X | | |
| Immediate Execution | | X | |
| Simple soft forks | | X | |
| Validator guaranteed IBC connection attempts | | | X |
| Validator based price oracles | | | X |
| Immediate Execution with increased time for block execution | X | X | X |
| Threshold Encrypted txs | X | X | X |

+ 162
- 0
docs/rfc/rfc-011-delete-gas.md View File

@ -0,0 +1,162 @@
# RFC 011: Remove Gas From Tendermint
## Changelog
- 03-Feb-2022: Initial draft (@williambanfield).
- 10-Feb-2022: Update in response to feedback (@williambanfield).
- 11-Feb-2022: Add reflection on MaxGas during consensus (@williambanfield).
## Abstract
In the v0.25.0 release, Tendermint added a mechanism for tracking 'Gas' in the mempool.
At a high level, Gas allows applications to specify how much it will cost the network,
often in compute resources, to execute a given transaction. While such a mechanism is common
in blockchain applications, it is not generalizable enough to be a maintained as a part
of Tendermint. This RFC explores the possibility of removing the concept of Gas from
Tendermint while still allowing applications the power to control the contents of
blocks to achieve similar goals.
## Background
The notion of Gas was included in the original Ethereum whitepaper and exists as
an important feature of the Ethereum blockchain.
The [whitepaper describes Gas][eth-whitepaper-messages] as an Anti-DoS mechanism. The Ethereum Virtual Machine
provides a Turing complete execution platform. Without any limitations, malicious
actors could waste computation resources by directing the EVM to perform large
or even infinite computations. Gas serves as a metering mechanism to prevent this.
Gas appears to have been added to Tendermint multiple times, initially as part of
a now defunct `/vm` package, and in its most recent iteration [as part of v0.25.0][gas-add-pr]
as a mechanism to limit the transactions that will be included in the block by an additional
parameter.
Gas has gained adoption within the Cosmos ecosystem [as part of the Cosmos SDK][cosmos-sdk-gas].
The SDK provides facilities for tracking how much 'Gas' a transaction is expected to take
and a mechanism for tracking how much gas a transaction has already taken.
Non-SDK applications also make use of the concept of Gas. Anoma appears to implement
[a gas system][anoma-gas] to meter the transactions it executes.
While the notion of gas is present in projects that make use of Tendermint, it is
not a concern of Tendermint's. Tendermint's value and goal is producing blocks
via a distributed consensus algorithm. Tendermint relies on the application specific
code to decide how to handle the transactions Tendermint has produced (or if the
application wants to consider them at all). Gas is an application concern.
Our implementation of Gas is not currently enforced by consensus. Our current validation check that
occurs during block propagation does not verify that the block is under the configured `MaxGas`.
Ensuring that the transactions in a proposed block do not exceed `MaxGas` would require
input from the application during propagation. The `ProcessProposal` method introduced
as part of ABCI++ would enable such input but would further entwine Tendermint and
the application. The issue of checking `MaxGas` during block propagation is important
because it demonstrates that the feature as it currently exists is not implemented
as fully as it perhaps should be.
Our implementation of Gas is causing issues for node operators and relayers. At
the moment, transactions that overflow the configured 'MaxGas' can be silently rejected
from the mempool. Overflowing MaxGas is the _only_ way that a transaction can be considered
invalid that is not directly a result of failing the `CheckTx`. Operators, and the application,
do not know that a transaction was removed from the mempool for this reason. A stateless check
of this nature is exactly what `CheckTx` exists for and there is no reason for the mempool
to keep track of this data separately. A special [MempoolError][add-mempool-error] field
was added in v0.35 to communicate to clients that a transaction failed after `CheckTx`.
While this should alleviate the pain for operators wishing to understand if their
transaction was included in the mempool, it highlights that the abstraction of
what is included in the mempool is not currently well defined.
Removing Gas from Tendermint and the mempool would allow for the mempool to be a better
abstraction: any transaction that arrived at `CheckTx` and passed the check will either be
a candidate for a later block or evicted after a TTL is reached or to make room for
other, higher priority transactions. All other transactions are completely invalid and can be discarded forever.
Removing gas will not be completely straightforward. It will mean ensuring that
equivalent functionality can be implemented outside of the mempool using the mempool's API.
## Discussion
This section catalogs the functionality that will need to exist within the Tendermint
mempool to allow Gas to be removed and replaced by application-side bookkeeping.
### Requirement: Provide Mempool Tx Sorting Mechanism
Gas produces a market for inclusion in a block. On many networks, a [gas fee][cosmos-sdk-fees] is
included in pending transactions. This fee indicates how much a user is willing to
pay per unit of execution and the fees are distributed to validators.
Validators wishing to extract higher gas fees are incentivized to include transactions
with the highest listed gas fees into each block. This produces a natural ordering
of the pending transactions. Applications wishing to implement a gas mechanism need
to be able to order the transactions in the mempool. This can trivially be accomplished
by sorting transactions using the `priority` field available to applications as part of
v0.35's `ResponseCheckTx` message.
### Requirement: Allow Application-Defined Block Resizing
When creating a block proposal, Tendermint pulls a set of possible transactions out of
the mempool to include in the next block. Tendermint uses MaxGas to limit the set of transactions
it pulls out of the mempool fetching a set of transactions whose sum is less than MaxGas.
By removing gas tracking from Tendermint's mempool, Tendermint will need to provide a way for
applications to determine an acceptable set of transactions to include in the block.
This is what the new ABCI++ `PrepareProposal` method is useful for. Applications
that wish to limit the contents of a block by an application-defined limit may
do so by removing transactions from the proposal it is passed during `PrepareProposal`.
Applications wishing to reach parity with the current Gas implementation may do
so by creating an application-side limit: filtering out transactions from
`PrepareProposal` the cause the proposal the exceed the maximum gas. Additionally,
applications can currently opt to have all transactions in the mempool delivered
during `PrepareProposal` by passing `-1` for `MaxGas` and `MaxBytes` into
[ReapMaxBytesMaxGas][reap-max-bytes-max-gas].
### Requirement: Handle Transaction Metadata
Moving the gas mechanism into applications adds an additional piece of complexity
to applications. The application must now track how much gas it expects a transaction
to consume. The mempool currently handles this bookkeeping responsibility and uses the estimated
gas to determine the set of transactions to include in the block. In order to task
the application with keeping track of this metadata, we should make it easier for the
application to do so. In general, we'll want to keep only one copy of this type
of metadata in the program at a time, either in the application or in Tendermint.
The following sections are possible solutions to the problem of storing transaction
metadata without duplication.
#### Metadata Handling: EvictTx Callback
A possible approach to handling transaction metadata is by adding a new `EvictTx`
ABCI method. Whenever the mempool is removing a transaction, either because it has
reached its TTL or because it failed `RecheckTx`, `EvictTx` would be called with
the transaction hash. This would indicate to the application that it could free any
metadata it was storing about the transaction such as the computed gas fee.
Eviction callbacks are pretty common in caching systems, so this would be very
well-worn territory.
#### Metadata Handling: Application-Specific Metadata Field(s)
An alternative approach to handling transaction metadata would be would be the
addition of a new application-metadata field in the `ResponseCheckTx`. This field
would be a protocol buffer message whose contents were entirely opaque to Tendermint.
The application would be responsible for marshalling and unmarshalling whatever data
it stored in this field. During `PrepareProposal`, the application would be passed
this metadata along with the transaction, allowing the application to use it to perform
any necessary filtering.
If either of these proposed metadata handling techniques are selected, it's likely
useful to enable applications to gossip metadata along with the transaction it is
gossiping. This could easily take the form of an opaque proto message that is
gossiped along with the transaction.
## References
[eth-whitepaper-messages]: https://ethereum.org/en/whitepaper/#messages-and-transactions
[gas-add-pr]: https://github.com/tendermint/tendermint/pull/2360
[cosmos-sdk-gas]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/gas-fees.md
[cosmos-sdk-fees]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/tx-lifecycle.md#gas-and-fees
[anoma-gas]: https://github.com/anoma/anoma/blob/6974fe1532a59db3574fc02e7f7e65d1216c1eb2/docs/src/specs/ledger.md#transaction-execution
[cosmos-sdk-fee]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/types/tx/tx.pb.go#L780-L794
[issue-7750]: https://github.com/tendermint/tendermint/issues/7750
[reap-max-bytes-max-gas]: https://github.com/tendermint/tendermint/blob/1ac58469f32a98f1c0e2905ca1773d9eac7b7103/internal/mempool/types.go#L45
[add-mempool-error]: https://github.com/tendermint/tendermint/blob/205bfca66f6da1b2dded381efb9ad3792f9404cf/rpc/coretypes/responses.go#L239

+ 98
- 0
docs/rfc/rfc-012-semantic-versioning.md View File

@ -0,0 +1,98 @@
<<<<<<< HEAD:docs/rfc/rfc-012-semantic-versioning.md
# RFC 012: Semantic Versioning
=======
# RFC 014: Semantic Versioning
>>>>>>> a895a8ea5f (Rename and renumber imported RFCs.):docs/rfc/rfc-014-semantic-versioning.md
## Changelog
- 2021-11-19: Initial Draft
- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 006](https://github.com/tendermint/spec/pull/365))
## Author(s)
- Callum Waters @cmwaters
## Context
We use versioning as an instrument to hold a set of promises to users and signal when such a set changes and how. In the conventional sense of a Go library, major versions signal that the public Go API’s have changed in a breaking way and thus require the users of such libraries to change their usage accordingly. Tendermint is a bit different in that there are multiple users: application developers (both in-process and out-of-process), node operators, and external clients. More importantly, both how these users interact with Tendermint and what's important to these users differs from how users interact and what they find important in a more conventional library.
This document attempts to encapsulate the discussions around versioning in Tendermint and draws upon them to propose a guide to how Tendermint uses versioning to make promises to its users.
For a versioning policy to make sense, we must also address the intended frequency of breaking changes. The strictest guarantees in the world will not help users if we plan to break them with every release.
Finally I would like to remark that this RFC only addresses the "what", as in what are the rules for versioning. The "how" of Tendermint implementing the versioning rules we choose, will be addressed in a later RFC on Soft Upgrades.
## Discussion
We first begin with a round up of the various users and a set of assumptions on what these users expect from Tendermint in regards to versioning:
1. **Application Developers**, those that use the ABCI to build applications on top of Tendermint, are chiefly concerned with that API. Breaking changes will force developers to modify large portions of their codebase to accommodate for the changes. Some ABCI changes such as introducing priority for the mempool don't require any effort and can be lazily adopted whilst changes like ABCI++ may force applications to redesign their entire execution system. It's also worth considering that the API's for go developers differ to developers of other languages. The former here can use the entire Tendermint library, most notably the local RPC methods, and so the team must be wary of all public Go API's.
2. **Node Operators**, those running node infrastructure, are predominantly concerned with downtime, complexity and frequency of upgrading, and avoiding data loss. They may be also concerned about changes that may break the scripts and tooling they use to supervise their nodes.
3. **External Clients** are those that perform any of the following:
- consume the RPC endpoints of nodes like `/block`
- subscribe to the event stream
- make queries to the indexer
This set are concerned with chain upgrades which will impact their ability to query state and block data as well as broadcast transactions. Examples include wallets and block explorers.
4. **IBC module and relayers**. The developers of IBC and consumers of their software are concerned about changes that may affect a chain's ability to send arbitrary messages to another chain. Specifically, these users are affected by any breaking changes to the light client verification algorithm.
Although we present them here as having different concerns, in a broader sense these user groups share a concern for the end users of applications. A crucial principle guiding this RFC is that **the ability for chains to provide continual service is more important than the actual upgrade burden put on the developers of these chains**. This means some extra burden for application developers is tolerable if it minimizes or substantially reduces downtime for the end user.
### Modes of Interprocess Communication
Tendermint has two primary mechanisms to communicate with other processes: RPC and P2P. The division marks the boundary between the internal and external components of the network:
- The P2P layer is used in all cases that nodes (of any type) need to communicate with one another.
- The RPC interface is for any outside process that wants to communicate with a node.
The design principle here is that **communication via RPC is to a trusted source** and thus the RPC service prioritizes inspection rather than verification. The P2P interface is the primary medium for verification.
As an example, an in-browser light client would verify headers (and perhaps application state) via the p2p layer, and then pass along information on to the client via RPC (or potentially directly via a separate API).
The main exceptions to this are the IBC module and relayers, which are external to the node but also require verifiable data. Breaking changes to the light client verification path mean that all neighbouring chains that are connected will no longer be able to verify state transitions and thus pass messages back and forward.
## Proposal
Tendermint version labels will follow the syntax of [Semantic Versions 2.0.0](https://semver.org/) with a major, minor and patch version. The version components will be interpreted according to these rules:
For the entire cycle of a **major version** in Tendermint:
- All blocks and state data in a blockchain can be queried. All headers can be verified even across minor version changes. Nodes can both block sync and state sync from genesis to the head of the chain.
- Nodes in a network are able to communicate and perform BFT state machine replication so long as the agreed network version is the lowest of all nodes in a network. For example, nodes using version 1.5.x and 1.2.x can operate together so long as the network version is 1.2 or lower (but still within the 1.x range). This rule essentially captures the concept of network backwards compatibility.
- Node RPC endpoints will remain compatible with existing external clients:
- New endpoints may be added, but old endpoints may not be removed.
- Old endpoints may be extended to add new request and response fields, but requests not using those fields must function as before the change.
- Migrations should be automatic. Upgrading of one node can happen asynchronously with respect to other nodes (although agreement of a network-wide upgrade must still occur synchronously via consensus).
For the entire cycle of a **minor version** in Tendermint:
- Public Go API's, for example in `node` or `abci` packages will not change in a way that requires any consumer (not just application developers) to modify their code.
- No breaking changes to the block protocol. This means that all block related data structures should not change in a way that breaks any of the hashes, the consensus engine or light client verification.
- Upgrades between minor versions may not result in any downtime (i.e., no migrations are required), nor require any changes to the config files to continue with the existing behavior. A minor version upgrade will require only stopping the existing process, swapping the binary, and starting the new process.
A new **patch version** of Tendermint will only contain bug fixes and updates that impact the security and stability of Tendermint.
These guarantees will come into effect at release 1.0.
## Status
Proposed
## Consequences
### Positive
- Clearer communication of what versioning means to us and the effect they have on our users.
### Negative
- Can potentially incur greater engineering effort to uphold and follow these guarantees.
### Neutral
## References
- [SemVer](https://semver.org/)
- [Tendermint Tracking Issue](https://github.com/tendermint/tendermint/issues/5680)

rfc/004-abci++.md → docs/rfc/rfc-013-abci++.md View File


rfc/006-semantic-versioning.md → docs/rfc/rfc-014-semantic-versioning.md View File


+ 2
- 2
docs/roadmap/roadmap.md View File

@ -8,7 +8,7 @@ order: 1
This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams.
Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint).
Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/tendermint/issues/new/choose). Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint).
This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status.
@ -43,7 +43,7 @@ Added a new `EventSink` interface to allow alternatives to Tendermint's propriet
### ABCI++
An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, and complete delivery of blocks after agreement (to allow for concurrent execution). It enables both immediate and delayed agreement. [More](https://github.com/tendermint/spec/blob/master/spec/abci++/README.md)
An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, and complete delivery of blocks after agreement (to allow for concurrent execution). It enables both immediate and delayed agreement. [More](https://github.com/tendermint/tendermint/blob/master/spec/abci++/README.md)
### Proposer-Based Timestamps


+ 1
- 1
docs/tendermint-core/block-structure.md View File

@ -11,6 +11,6 @@ nodes. This blockchain is accessible via various RPC endpoints, mainly
`/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what
exactly is stored in these blocks?
The [specification](https://github.com/tendermint/spec/blob/8dd2ed4c6fe12459edeb9b783bdaaaeb590ec15c/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started.
The [specification](https://github.com/tendermint/tendermint/tree/master/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started.
To dig deeper, check out the [types package documentation](https://godoc.org/github.com/tendermint/tendermint/types).

+ 1
- 1
docs/tendermint-core/consensus/README.md View File

@ -23,7 +23,7 @@ explained in a forthcoming document.
For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the
block as the block size is big, i.e., they don't embed the block inside `Proposal` and
`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in
[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section)
[Blockchain](https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md#blockid) section)
that uniquely identifies each block. The block itself is
disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a
proposer first splitting a block into a number of block parts, that are then gossiped between


+ 1
- 1
docs/tendermint-core/subscription.md View File

@ -43,7 +43,7 @@ transactions](../app-dev/indexing-transactions.md) for details.
When validator set changes, ValidatorSetUpdates event is published. The
event carries a list of pubkey/power pairs. The list is the same
Tendermint receives from ABCI application (see [EndBlock
section](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#endblock) in
section](https://github.com/tendermint/tendermint/blob/master/spec/abci/abci.md#endblock) in
the ABCI spec).
Response:


+ 1
- 2
go.mod View File

@ -14,7 +14,7 @@ require (
github.com/golangci/golangci-lint v1.44.0
github.com/google/orderedcode v0.0.1
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.4.2
github.com/gorilla/websocket v1.5.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/lib/pq v1.10.4
@ -23,7 +23,6 @@ require (
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
github.com/ory/dockertest v3.3.5+incompatible
github.com/prometheus/client_golang v1.12.1
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/rs/cors v1.8.2
github.com/rs/zerolog v1.26.1
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa


+ 2
- 4
go.sum View File

@ -477,8 +477,8 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw=
@ -865,8 +865,6 @@ github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=


+ 6
- 8
internal/blocksync/pool_test.go View File

@ -125,7 +125,6 @@ func TestBlockPoolBasic(t *testing.T) {
case err := <-errorsCh:
t.Error(err)
case request := <-requestsCh:
t.Logf("Pulled new BlockRequest %v", request)
if request.Height == 300 {
return // Done!
}
@ -139,21 +138,19 @@ func TestBlockPoolTimeout(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
start := int64(42)
peers := makePeers(10, start+1, 1000)
errorsCh := make(chan peerError, 1000)
requestsCh := make(chan BlockRequest, 1000)
pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh)
pool := NewBlockPool(logger, start, requestsCh, errorsCh)
err := pool.Start(ctx)
if err != nil {
t.Error(err)
}
t.Cleanup(func() { cancel(); pool.Wait() })
for _, peer := range peers {
t.Logf("Peer %v", peer.id)
}
// Introduce each peer.
go func() {
for _, peer := range peers {
@ -182,7 +179,6 @@ func TestBlockPoolTimeout(t *testing.T) {
for {
select {
case err := <-errorsCh:
t.Log(err)
// consider error to be always timeout here
if _, ok := timedOut[err.peerID]; !ok {
counter++
@ -191,7 +187,9 @@ func TestBlockPoolTimeout(t *testing.T) {
}
}
case request := <-requestsCh:
t.Logf("Pulled new BlockRequest %+v", request)
logger.Debug("received request",
"counter", counter,
"request", request)
}
}
}


+ 4
- 4
internal/blocksync/reactor_test.go View File

@ -203,7 +203,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
@ -243,7 +243,7 @@ func TestReactor_SyncTime(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
@ -271,7 +271,7 @@ func TestReactor_NoBlockResponse(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
@ -323,7 +323,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)


+ 0
- 3
internal/consensus/README.md View File

@ -1,3 +0,0 @@
# Consensus
See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus).

+ 4
- 2
internal/consensus/byzantine_test.go View File

@ -60,7 +60,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
defer os.RemoveAll(thisConfig.RootDir)
@ -180,6 +180,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
require.NotNil(t, lazyNodeState.privValidator)
var commit *types.Commit
var votes []*types.Vote
switch {
case lazyNodeState.Height == lazyNodeState.state.InitialHeight:
// We're creating a proposal for the first block.
@ -188,6 +189,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
case lazyNodeState.LastCommit.HasTwoThirdsMajority():
// Make the commit from LastCommit
commit = lazyNodeState.LastCommit.MakeCommit()
votes = lazyNodeState.LastCommit.GetVotes()
default: // This shouldn't happen.
lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
return
@ -205,7 +207,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
proposerAddr := lazyNodeState.privValidatorPubKey.Address()
block, blockParts, err := lazyNodeState.blockExec.CreateProposalBlock(
ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr,
ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, votes,
)
require.NoError(t, err)


+ 13
- 14
internal/consensus/common_test.go View File

@ -50,23 +50,23 @@ type cleanupFunc func()
func configSetup(t *testing.T) *config.Config {
t.Helper()
cfg, err := ResetConfig("consensus_reactor_test")
cfg, err := ResetConfig(t.TempDir(), "consensus_reactor_test")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })
consensusReplayConfig, err := ResetConfig("consensus_replay_test")
consensusReplayConfig, err := ResetConfig(t.TempDir(), "consensus_replay_test")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(consensusReplayConfig.RootDir) })
configStateTest, err := ResetConfig("consensus_state_test")
configStateTest, err := ResetConfig(t.TempDir(), "consensus_state_test")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(configStateTest.RootDir) })
configMempoolTest, err := ResetConfig("consensus_mempool_test")
configMempoolTest, err := ResetConfig(t.TempDir(), "consensus_mempool_test")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(configMempoolTest.RootDir) })
configByzantineTest, err := ResetConfig("consensus_byzantine_test")
configByzantineTest, err := ResetConfig(t.TempDir(), "consensus_byzantine_test")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) })
@ -78,8 +78,8 @@ func ensureDir(t *testing.T, dir string, mode os.FileMode) {
require.NoError(t, tmos.EnsureDir(dir, mode))
}
func ResetConfig(name string) (*config.Config, error) {
return config.ResetTestRoot(name)
func ResetConfig(dir, name string) (*config.Config, error) {
return config.ResetTestRoot(dir, name)
}
//-------------------------------------------------------------------------------
@ -422,7 +422,7 @@ func newState(
) *State {
t.Helper()
cfg, err := config.ResetTestRoot("consensus_state_test")
cfg, err := config.ResetTestRoot(t.TempDir(), "consensus_state_test")
require.NoError(t, err)
return newStateWithConfig(ctx, t, logger, cfg, state, pv, app)
@ -769,7 +769,7 @@ func makeConsensusState(
blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
configRootDirs = append(configRootDirs, thisConfig.RootDir)
@ -827,7 +827,7 @@ func randConsensusNetWithPeers(
configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ {
state, _ := sm.MakeGenesisState(genDoc)
thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
configRootDirs = append(configRootDirs, thisConfig.RootDir)
@ -839,10 +839,10 @@ func randConsensusNetWithPeers(
if i < nValidators {
privVal = privVals[i]
} else {
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_")
require.NoError(t, err)
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_")
require.NoError(t, err)
privVal, err = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "")
@ -946,8 +946,7 @@ func (*mockTicker) SetLogger(log.Logger) {}
func newPersistentKVStore(t *testing.T, logger log.Logger) abci.Application {
t.Helper()
dir, err := os.MkdirTemp("", "persistent-kvstore")
require.NoError(t, err)
dir := t.TempDir()
return kvstore.NewPersistentKVStoreApplication(logger, dir)
}


+ 1
- 0
internal/consensus/invalid_test.go View File

@ -7,6 +7,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/bytes"


+ 19
- 13
internal/consensus/mempool_test.go View File

@ -35,7 +35,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
baseConfig := configSetup(t)
config, err := ResetConfig("consensus_mempool_txs_available_test")
config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test")
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
@ -62,7 +62,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
config, err := ResetConfig("consensus_mempool_txs_available_test")
config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test")
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
@ -87,7 +87,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
config, err := ResetConfig("consensus_mempool_txs_available_test")
config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test")
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
@ -192,8 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resCommit := app.Commit()
assert.True(t, len(resCommit.Data) > 0)
@ -264,15 +264,21 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
}
func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
txValue := txAsUint64(req.Tx)
if txValue != uint64(app.txCount) {
return abci.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
respTxs := make([]*abci.ResponseDeliverTx, len(req.Txs))
for i, tx := range req.Txs {
txValue := txAsUint64(tx)
if txValue != uint64(app.txCount) {
respTxs[i] = &abci.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %d, got %d", app.txCount, txValue),
}
continue
}
app.txCount++
respTxs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK}
}
app.txCount++
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
return abci.ResponseFinalizeBlock{Txs: respTxs}
}
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {


+ 1
- 0
internal/consensus/metrics.go View File

@ -3,6 +3,7 @@ package consensus
import (
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/discard"
"github.com/tendermint/tendermint/types"
prometheus "github.com/go-kit/kit/metrics/prometheus"


+ 1
- 0
internal/consensus/mocks/cons_sync_reactor.go View File

@ -4,6 +4,7 @@ package mocks
import (
mock "github.com/stretchr/testify/mock"
state "github.com/tendermint/tendermint/internal/state"
)


+ 1
- 0
internal/consensus/mocks/fast_sync_reactor.go View File

@ -4,6 +4,7 @@ package mocks
import (
mock "github.com/stretchr/testify/mock"
state "github.com/tendermint/tendermint/internal/state"
time "time"


+ 1
- 1
internal/consensus/reactor_test.go View File

@ -391,7 +391,7 @@ func TestReactorWithEvidence(t *testing.T) {
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
defer os.RemoveAll(thisConfig.RootDir)


+ 3
- 8
internal/consensus/replay_stubs.go View File

@ -87,20 +87,15 @@ type mockProxyApp struct {
abciResponses *tmstate.ABCIResponses
}
func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTxs[mock.txCount]
func (mock *mockProxyApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
r := mock.abciResponses.FinalizeBlock
mock.txCount++
if r == nil {
return abci.ResponseDeliverTx{}
return abci.ResponseFinalizeBlock{}
}
return *r
}
func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
mock.txCount = 0
return *mock.abciResponses.EndBlock
}
func (mock *mockProxyApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{Data: mock.appHash}
}

+ 24
- 21
internal/consensus/replay_test.go View File

@ -142,7 +142,7 @@ func TestWALCrash(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
consensusReplayConfig, err := ResetConfig(tc.name)
consensusReplayConfig, err := ResetConfig(t.TempDir(), tc.name)
require.NoError(t, err)
crashWALandCheckLiveness(ctx, t, consensusReplayConfig, tc.initFn, tc.heightToStop)
})
@ -665,12 +665,13 @@ func TestMockProxyApp(t *testing.T) {
logger := log.TestingLogger()
var validTxs, invalidTxs = 0, 0
txIndex := 0
txCount := 0
assert.NotPanics(t, func() {
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{})
abciResWithEmptyDeliverTx.FinalizeBlock = new(abci.ResponseFinalizeBlock)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{})
// called when saveABCIResponses:
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
@ -685,31 +686,33 @@ func TestMockProxyApp(t *testing.T) {
require.NoError(t, err)
abciRes := new(tmstate.ABCIResponses)
abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))
abciRes.FinalizeBlock = new(abci.ResponseFinalizeBlock)
abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs))
someTx := []byte("tx")
resp, err := mock.DeliverTx(ctx, abci.RequestDeliverTx{Tx: someTx})
resp, err := mock.FinalizeBlock(ctx, abci.RequestFinalizeBlock{Txs: [][]byte{someTx}})
require.NoError(t, err)
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
if resp.Code == abci.CodeTypeOK {
validTxs++
} else {
invalidTxs++
for _, tx := range resp.Txs {
if tx.Code == abci.CodeTypeOK {
validTxs++
} else {
invalidTxs++
}
txCount++
}
abciRes.DeliverTxs[txIndex] = resp
txIndex++
assert.NoError(t, err)
})
assert.True(t, validTxs == 1)
assert.True(t, invalidTxs == 0)
require.Equal(t, 1, txCount)
require.Equal(t, 1, validTxs)
require.Zero(t, invalidTxs)
}
func tempWALWithData(t *testing.T, data []byte) string {
t.Helper()
walFile, err := os.CreateTemp("", "wal")
walFile, err := os.CreateTemp(t.TempDir(), "wal")
require.NoError(t, err, "failed to create temp WAL file")
_, err = walFile.Write(data)
@ -743,7 +746,7 @@ func testHandshakeReplay(
logger := log.TestingLogger()
if testValidatorsChange {
testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_m", t.Name(), mode))
require.NoError(t, err)
defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
stateDB = dbm.NewMemDB()
@ -754,7 +757,7 @@ func testHandshakeReplay(
commits = sim.Commits
store = newMockBlockStore(t, cfg, genesisState.ConsensusParams)
} else { // test single node
testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_s", t.Name(), mode))
require.NoError(t, err)
defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
walBody, err := WALWithNBlocks(ctx, t, logger, numBlocks)
@ -1004,7 +1007,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg, err := ResetConfig("handshake_test_")
cfg, err := ResetConfig(t.TempDir(), "handshake_test_")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
@ -1288,7 +1291,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := abciclient.NewLocalCreator(app)
cfg, err := ResetConfig("handshake_test_")
cfg, err := ResetConfig(t.TempDir(), "handshake_test_")
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) })


+ 3
- 1
internal/consensus/state.go View File

@ -1334,6 +1334,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, b
}
var commit *types.Commit
var votes []*types.Vote
switch {
case cs.Height == cs.state.InitialHeight:
// We're creating a proposal for the first block.
@ -1343,6 +1344,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, b
case cs.LastCommit.HasTwoThirdsMajority():
// Make the commit from LastCommit
commit = cs.LastCommit.MakeCommit()
votes = cs.LastCommit.GetVotes()
default: // This shouldn't happen.
cs.logger.Error("propose step; cannot propose anything without commit for the previous block")
@ -1358,7 +1360,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (block *types.Block, b
proposerAddr := cs.privValidatorPubKey.Address()
return cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr)
return cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr, votes)
}
// Enter: `timeoutPropose` after entering Propose.


+ 10
- 18
internal/consensus/types/height_vote_set_test.go View File

@ -2,11 +2,10 @@ package types
import (
"context"
"log"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/test/factory"
@ -16,40 +15,33 @@ import (
"github.com/tendermint/tendermint/types"
)
var cfg *config.Config // NOTE: must be reset for each _test.go file
func TestMain(m *testing.M) {
var err error
cfg, err = config.ResetTestRoot("consensus_height_vote_set_test")
func TestPeerCatchupRounds(t *testing.T) {
cfg, err := config.ResetTestRoot(t.TempDir(), "consensus_height_vote_set_test")
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
code := m.Run()
os.RemoveAll(cfg.RootDir)
os.Exit(code)
}
func TestPeerCatchupRounds(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
valSet, privVals := factory.ValidatorSet(ctx, t, 10, 1)
hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet)
chainID := cfg.ChainID()
hvs := NewHeightVoteSet(chainID, 1, valSet)
vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals)
vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals, chainID)
added, err := hvs.AddVote(vote999_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals)
vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals, chainID)
added, err = hvs.AddVote(vote1000_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals)
vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals, chainID)
added, err = hvs.AddVote(vote1001_0, "peer1")
if err != ErrGotVoteFromUnwantedRound {
t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err)
@ -71,6 +63,7 @@ func makeVoteHR(
height int64,
valIndex, round int32,
privVals []types.PrivValidator,
chainID string,
) *types.Vote {
t.Helper()
@ -89,7 +82,6 @@ func makeVoteHR(
Type: tmproto.PrecommitType,
BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}},
}
chainID := cfg.ChainID()
v := vote.ToProto()
err = privVal.SignVote(ctx, chainID, v)


+ 1
- 0
internal/consensus/types/peer_round_state_test.go View File

@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/bits"
)


+ 1
- 1
internal/consensus/wal_generator.go View File

@ -145,7 +145,7 @@ func makeAddrs() (p2pAddr, rpcAddr string) {
// getConfig returns a config for test cases
func getConfig(t *testing.T) *config.Config {
c, err := config.ResetTestRoot(t.Name())
c, err := config.ResetTestRoot(t.TempDir(), t.Name())
require.NoError(t, err)
p2pAddr, rpcAddr := makeAddrs()


+ 2
- 2
internal/eventbus/event_bus.go View File

@ -89,7 +89,7 @@ func (b *EventBus) Publish(ctx context.Context, eventValue string, eventData typ
}
func (b *EventBus) PublishEventNewBlock(ctx context.Context, data types.EventDataNewBlock) error {
events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...)
events := data.ResultFinalizeBlock.Events
// add Tendermint-reserved new block event
events = append(events, types.EventNewBlock)
@ -100,7 +100,7 @@ func (b *EventBus) PublishEventNewBlock(ctx context.Context, data types.EventDat
func (b *EventBus) PublishEventNewBlockHeader(ctx context.Context, data types.EventDataNewBlockHeader) error {
// no explicit deadline for publishing events
events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...)
events := data.ResultFinalizeBlock.Events
// add Tendermint-reserved new block header event
events = append(events, types.EventNewBlockHeader)


+ 17
- 25
internal/eventbus/event_bus_test.go View File

@ -83,14 +83,12 @@ func TestEventBusPublishEventNewBlock(t *testing.T) {
bps, err := block.MakePartSet(types.BlockPartSizeBytes)
require.NoError(t, err)
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}
resultBeginBlock := abci.ResponseBeginBlock{
resultFinalizeBlock := abci.ResponseFinalizeBlock{
Events: []abci.Event{
{Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}},
},
}
resultEndBlock := abci.ResponseEndBlock{
Events: []abci.Event{
{Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}},
{Type: "testType", Attributes: []abci.EventAttribute{
{Key: "baz", Value: "1"},
{Key: "foz", Value: "2"},
}},
},
}
@ -111,15 +109,13 @@ func TestEventBusPublishEventNewBlock(t *testing.T) {
edt := msg.Data().(types.EventDataNewBlock)
assert.Equal(t, block, edt.Block)
assert.Equal(t, blockID, edt.BlockID)
assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock)
assert.Equal(t, resultEndBlock, edt.ResultEndBlock)
assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock)
}()
err = eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{
Block: block,
BlockID: blockID,
ResultBeginBlock: resultBeginBlock,
ResultEndBlock: resultEndBlock,
Block: block,
BlockID: blockID,
ResultFinalizeBlock: resultFinalizeBlock,
})
assert.NoError(t, err)
@ -256,14 +252,12 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) {
require.NoError(t, err)
block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{})
resultBeginBlock := abci.ResponseBeginBlock{
Events: []abci.Event{
{Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}},
},
}
resultEndBlock := abci.ResponseEndBlock{
resultFinalizeBlock := abci.ResponseFinalizeBlock{
Events: []abci.Event{
{Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}},
{Type: "testType", Attributes: []abci.EventAttribute{
{Key: "baz", Value: "1"},
{Key: "foz", Value: "2"},
}},
},
}
@ -283,14 +277,12 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) {
edt := msg.Data().(types.EventDataNewBlockHeader)
assert.Equal(t, block.Header, edt.Header)
assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock)
assert.Equal(t, resultEndBlock, edt.ResultEndBlock)
assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock)
}()
err = eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{
Header: block.Header,
ResultBeginBlock: resultBeginBlock,
ResultEndBlock: resultEndBlock,
Header: block.Header,
ResultFinalizeBlock: resultFinalizeBlock,
})
assert.NoError(t, err)


+ 1
- 1
internal/evidence/doc.go View File

@ -1,7 +1,7 @@
/*
Package evidence handles all evidence storage and gossiping from detection to block proposal.
For the different types of evidence refer to the `evidence.go` file in the types package
or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md.
or https://github.com/tendermint/tendermint/blob/master/spec/consensus/light-client/accountability.md.
Gossiping


+ 1
- 0
internal/evidence/mocks/block_store.go View File

@ -4,6 +4,7 @@ package mocks
import (
mock "github.com/stretchr/testify/mock"
types "github.com/tendermint/tendermint/types"
)


+ 8
- 7
internal/inspect/inspect_test.go View File

@ -14,6 +14,7 @@ import (
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/inspect"
@ -28,7 +29,7 @@ import (
)
func TestInspectConstructor(t *testing.T) {
cfg, err := config.ResetTestRoot("test")
cfg, err := config.ResetTestRoot(t.TempDir(), "test")
require.NoError(t, err)
testLogger := log.TestingLogger()
t.Cleanup(leaktest.Check(t))
@ -43,7 +44,7 @@ func TestInspectConstructor(t *testing.T) {
}
func TestInspectRun(t *testing.T) {
cfg, err := config.ResetTestRoot("test")
cfg, err := config.ResetTestRoot(t.TempDir(), "test")
require.NoError(t, err)
testLogger := log.TestingLogger()
@ -263,13 +264,13 @@ func TestBlockResults(t *testing.T) {
stateStoreMock := &statemocks.Store{}
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{
{
GasUsed: testGasUsed,
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
Txs: []*abcitypes.ResponseDeliverTx{
{
GasUsed: testGasUsed,
},
},
},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}, nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Base").Return(int64(0))


+ 3
- 9
internal/libs/autofile/autofile_test.go View File

@ -25,11 +25,7 @@ func TestSIGHUP(t *testing.T) {
})
// First, create a temporary directory and move into it
dir, err := os.MkdirTemp("", "sighup_test")
require.NoError(t, err)
t.Cleanup(func() {
_ = os.RemoveAll(dir)
})
dir := t.TempDir()
require.NoError(t, os.Chdir(dir))
// Create an AutoFile in the temporary directory
@ -48,9 +44,7 @@ func TestSIGHUP(t *testing.T) {
require.NoError(t, os.Rename(name, name+"_old"))
// Move into a different temporary directory
otherDir, err := os.MkdirTemp("", "sighup_test_other")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(otherDir) })
otherDir := t.TempDir()
require.NoError(t, os.Chdir(otherDir))
// Send SIGHUP to self.
@ -112,7 +106,7 @@ func TestAutoFileSize(t *testing.T) {
defer cancel()
// First, create an AutoFile writing to a tempfile dir
f, err := os.CreateTemp("", "sighup_test")
f, err := os.CreateTemp(t.TempDir(), "sighup_test")
require.NoError(t, err)
require.NoError(t, f.Close())


+ 2
- 5
internal/libs/autofile/group_test.go View File

@ -132,11 +132,8 @@ func TestRotateFile(t *testing.T) {
}
}()
dir, err := os.MkdirTemp("", "rotate_test")
require.NoError(t, err)
defer os.RemoveAll(dir)
err = os.Chdir(dir)
require.NoError(t, err)
dir := t.TempDir()
require.NoError(t, os.Chdir(dir))
require.True(t, filepath.IsAbs(g.Head.Path))
require.True(t, filepath.IsAbs(g.Dir))


+ 0
- 31
internal/libs/sync/closer.go View File

@ -1,31 +0,0 @@
package sync
import "sync"
// Closer implements a primitive to close a channel that signals process
// termination while allowing a caller to call Close multiple times safely. It
// should be used in cases where guarantees cannot be made about when and how
// many times closure is executed.
type Closer struct {
closeOnce sync.Once
doneCh chan struct{}
}
// NewCloser returns a reference to a new Closer.
func NewCloser() *Closer {
return &Closer{doneCh: make(chan struct{})}
}
// Done returns the internal done channel allowing the caller either block or wait
// for the Closer to be terminated/closed.
func (c *Closer) Done() <-chan struct{} {
return c.doneCh
}
// Close gracefully closes the Closer. A caller should only call Close once, but
// it is safe to call it successive times.
func (c *Closer) Close() {
c.closeOnce.Do(func() {
close(c.doneCh)
})
}

+ 0
- 28
internal/libs/sync/closer_test.go View File

@ -1,28 +0,0 @@
package sync_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
)
func TestCloser(t *testing.T) {
closer := tmsync.NewCloser()
var timeout bool
select {
case <-closer.Done():
case <-time.After(time.Second):
timeout = true
}
for i := 0; i < 10; i++ {
closer.Close()
}
require.True(t, timeout)
<-closer.Done()
}

+ 1
- 1
internal/libs/tempfile/tempfile_test.go View File

@ -21,7 +21,7 @@ func TestWriteFileAtomic(t *testing.T) {
perm os.FileMode = 0600
)
f, err := os.CreateTemp("/tmp", "write-atomic-test-")
f, err := os.CreateTemp(t.TempDir(), "write-atomic-test-")
if err != nil {
t.Fatal(err)
}


+ 1
- 0
internal/mempool/ids_test.go View File

@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/types"
)


+ 9
- 2
internal/mempool/mempool_bench_test.go View File

@ -14,8 +14,13 @@ func BenchmarkTxMempool_CheckTx(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// setup the cache and the mempool number for hitting GetEvictableTxs during the
// benchmark. 5000 is the current default mempool size in the TM config.
txmp := setup(ctx, b, 10000)
txmp.config.Size = 5000
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
const peerID = 1
b.ResetTimer()
@ -26,9 +31,11 @@ func BenchmarkTxMempool_CheckTx(b *testing.B) {
require.NoError(b, err)
priority := int64(rng.Intn(9999-1000) + 1000)
tx := []byte(fmt.Sprintf("%X=%d", prefix, priority))
tx := []byte(fmt.Sprintf("sender-%d-%d=%X=%d", n, peerID, prefix, priority))
txInfo := TxInfo{SenderID: uint16(peerID)}
b.StartTimer()
require.NoError(b, txmp.CheckTx(ctx, tx, nil, TxInfo{}))
require.NoError(b, txmp.CheckTx(ctx, tx, nil, txInfo))
}
}

+ 1
- 1
internal/mempool/mempool_test.go View File

@ -82,7 +82,7 @@ func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoo
cc := abciclient.NewLocalCreator(app)
logger := log.TestingLogger()
cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
require.NoError(t, err)
cfg.Mempool.CacheSize = cacheSize
appConnMem, err := cc(logger)


+ 2
- 1
internal/mempool/reactor_test.go View File

@ -12,6 +12,7 @@ import (
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
@ -41,7 +42,7 @@ type reactorTestSuite struct {
func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) *reactorTestSuite {
t.Helper()
cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })


+ 1
- 0
internal/mempool/tx_test.go View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/types"
)


+ 1
- 0
internal/p2p/channel.go View File

@ -6,6 +6,7 @@ import (
"sync"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/types"
)


+ 1
- 0
internal/p2p/metrics_test.go View File

@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/proto/tendermint/p2p"
)


+ 1
- 0
internal/p2p/p2ptest/util.go View File

@ -2,6 +2,7 @@ package p2ptest
import (
gogotypes "github.com/gogo/protobuf/types"
"github.com/tendermint/tendermint/types"
)


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save