Browse Source

Merge branch 'master' into rateControl-refactor

pull/7828/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
119e694ad1
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
146 changed files with 4607 additions and 3811 deletions
  1. +3
    -3
      .github/workflows/build.yml
  2. +2
    -2
      .github/workflows/docker.yml
  3. +1
    -1
      .github/workflows/e2e-manual.yml
  4. +1
    -1
      .github/workflows/e2e-nightly-34x.yml
  5. +1
    -1
      .github/workflows/e2e-nightly-35x.yml
  6. +1
    -1
      .github/workflows/e2e-nightly-master.yml
  7. +1
    -1
      .github/workflows/e2e.yml
  8. +1
    -1
      .github/workflows/fuzz-nightly.yml
  9. +1
    -1
      .github/workflows/jepsen.yml
  10. +1
    -1
      .github/workflows/linkchecker.yml
  11. +6
    -3
      .github/workflows/lint.yml
  12. +1
    -1
      .github/workflows/linter.yml
  13. +1
    -1
      .github/workflows/markdown-links.yml
  14. +2
    -2
      .github/workflows/proto-check.yml
  15. +2
    -2
      .github/workflows/proto-dockerfile.yml
  16. +1
    -1
      .github/workflows/release.yml
  17. +2
    -2
      .github/workflows/tests.yml
  18. +21
    -0
      CHANGELOG.md
  19. +1
    -0
      CHANGELOG_PENDING.md
  20. +8
    -3
      Makefile
  21. +0
    -33
      abci/client/creators.go
  22. +0
    -85
      abci/client/socket_client_test.go
  23. +20
    -17
      abci/cmd/abci-cli/abci-cli.go
  24. +4
    -4
      abci/example/example_test.go
  25. +14
    -14
      abci/example/kvstore/kvstore.go
  26. +9
    -10
      abci/example/kvstore/kvstore_test.go
  27. +1
    -1
      abci/tests/server/client.go
  28. +2
    -2
      abci/tests/test_cli/ex1.abci
  29. +5
    -3
      abci/tests/test_cli/ex1.abci.out
  30. +3
    -3
      abci/tests/test_cli/ex2.abci
  31. +5
    -5
      abci/tests/test_cli/ex2.abci.out
  32. +3
    -3
      abci/types/application.go
  33. +1
    -1
      abci/types/messages_test.go
  34. +10
    -0
      abci/types/result.go
  35. +1505
    -1446
      abci/types/types.pb.go
  36. +1
    -1
      cmd/tendermint/commands/reindex_event.go
  37. +2
    -2
      cmd/tendermint/commands/reindex_event_test.go
  38. +76
    -3
      cmd/tendermint/commands/reset_priv_validator.go
  39. +1
    -0
      cmd/tendermint/main.go
  40. +16
    -16
      docs/app-dev/abci-cli.md
  41. +95
    -0
      docs/tendermint-core/consensus/proposer-based-timestamps.md
  42. +1
    -0
      docs/versions
  43. +7
    -3
      go.mod
  44. +2
    -2
      go.sum
  45. +25
    -19
      internal/blocksync/reactor.go
  46. +10
    -5
      internal/blocksync/reactor_test.go
  47. +165
    -158
      internal/consensus/byzantine_test.go
  48. +20
    -10
      internal/consensus/common_test.go
  49. +33
    -10
      internal/consensus/invalid_test.go
  50. +11
    -11
      internal/consensus/mempool_test.go
  51. +2
    -6
      internal/consensus/reactor.go
  52. +9
    -10
      internal/consensus/reactor_test.go
  53. +21
    -20
      internal/consensus/replay.go
  54. +26
    -20
      internal/consensus/replay_file.go
  55. +4
    -15
      internal/consensus/replay_stubs.go
  56. +46
    -87
      internal/consensus/replay_test.go
  57. +39
    -19
      internal/consensus/state.go
  58. +73
    -0
      internal/consensus/state_test.go
  59. +30
    -23
      internal/consensus/wal_generator.go
  60. +9
    -10
      internal/consensus/wal_test.go
  61. +0
    -32
      internal/eventbus/event_bus.go
  62. +2
    -2
      internal/eventbus/event_bus_test.go
  63. +33
    -35
      internal/evidence/pool.go
  64. +53
    -48
      internal/evidence/pool_test.go
  65. +5
    -4
      internal/evidence/reactor_test.go
  66. +34
    -26
      internal/evidence/verify_test.go
  67. +1
    -1
      internal/inspect/inspect_test.go
  68. +13
    -0
      internal/libs/autofile/group.go
  69. +1
    -1
      internal/libs/queue/queue_test.go
  70. +6
    -8
      internal/mempool/mempool.go
  71. +19
    -19
      internal/mempool/mempool_test.go
  72. +1
    -1
      internal/mempool/mock/mempool.go
  73. +3
    -3
      internal/mempool/reactor_test.go
  74. +1
    -1
      internal/mempool/types.go
  75. +8
    -8
      internal/p2p/conn/secret_connection_test.go
  76. +0
    -249
      internal/proxy/app_conn.go
  77. +182
    -11
      internal/proxy/client.go
  78. +88
    -18
      internal/proxy/client_test.go
  79. +0
    -131
      internal/proxy/multi_app_conn.go
  80. +0
    -99
      internal/proxy/multi_app_conn_test.go
  81. +0
    -20
      internal/pubsub/pubsub.go
  82. +1
    -1
      internal/pubsub/query/syntax/syntax_test.go
  83. +2
    -2
      internal/rpc/core/abci.go
  84. +5
    -7
      internal/rpc/core/blocks.go
  85. +2
    -2
      internal/rpc/core/blocks_test.go
  86. +9
    -11
      internal/rpc/core/env.go
  87. +5
    -5
      internal/rpc/core/mempool.go
  88. +2
    -2
      internal/rpc/core/net.go
  89. +1
    -1
      internal/rpc/core/status.go
  90. +76
    -107
      internal/state/execution.go
  91. +88
    -84
      internal/state/execution_test.go
  92. +0
    -24
      internal/state/export_test.go
  93. +9
    -21
      internal/state/helpers_test.go
  94. +10
    -11
      internal/state/indexer/block/kv/kv.go
  95. +6
    -6
      internal/state/indexer/block/kv/kv_test.go
  96. +3
    -3
      internal/state/indexer/indexer.go
  97. +2
    -2
      internal/state/indexer/indexer_service_test.go
  98. +1
    -1
      internal/state/indexer/sink/kv/kv_test.go
  99. +2
    -22
      internal/state/indexer/sink/psql/psql_test.go
  100. +1
    -1
      internal/state/indexer/tx/kv/kv_bench_test.go

+ 3
- 3
.github/workflows/build.yml View File

@ -23,7 +23,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
@ -44,7 +44,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
@ -66,7 +66,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |


+ 2
- 2
.github/workflows/docker.yml View File

@ -13,7 +13,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
@ -43,7 +43,7 @@ jobs:
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.13.0
uses: docker/login-action@v1.14.1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}


+ 1
- 1
.github/workflows/e2e-manual.yml View File

@ -19,7 +19,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e


+ 1
- 1
.github/workflows/e2e-nightly-34x.yml View File

@ -24,7 +24,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
with:
ref: 'v0.34.x'


+ 1
- 1
.github/workflows/e2e-nightly-35x.yml View File

@ -24,7 +24,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
with:
ref: 'v0.35.x'


+ 1
- 1
.github/workflows/e2e-nightly-master.yml View File

@ -23,7 +23,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e


+ 1
- 1
.github/workflows/e2e.yml View File

@ -17,7 +17,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |


+ 1
- 1
.github/workflows/fuzz-nightly.yml View File

@ -17,7 +17,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Install go-fuzz
working-directory: test/fuzz


+ 1
- 1
.github/workflows/jepsen.yml View File

@ -46,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the Jepsen repository
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
with:
repository: 'tendermint/jepsen'


+ 1
- 1
.github/workflows/linkchecker.yml View File

@ -6,7 +6,7 @@ jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
with:
folder-path: "docs"

+ 6
- 3
.github/workflows/lint.yml View File

@ -13,17 +13,20 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: actions/setup-go@v2
with:
go-version: '^1.17'
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: golangci/golangci-lint-action@v2.5.2
- uses: golangci/golangci-lint-action@v3.1.0
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.42.1
version: v1.44
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

+ 1
- 1
.github/workflows/linter.yml View File

@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
- name: Lint Code Base
uses: docker://github/super-linter:v4
env:


+ 1
- 1
.github/workflows/markdown-links.yml View File

@ -11,7 +11,7 @@ jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v3
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
with:
check-modified-files-only: 'yes'

+ 2
- 2
.github/workflows/proto-check.yml View File

@ -12,13 +12,13 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: check-breakage
run: make proto-check-breaking-ci

+ 2
- 2
.github/workflows/proto-dockerfile.yml View File

@ -28,7 +28,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Check out and assign tags
id: prep
run: |
@ -49,7 +49,7 @@ jobs:
uses: docker/setup-buildx-action@v1.6.0
- name: Log in to the container registry
uses: docker/login-action@v1.13.0
uses: docker/login-action@v1.14.1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}


+ 1
- 1
.github/workflows/release.yml View File

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
with:
fetch-depth: 0


+ 2
- 2
.github/workflows/tests.yml View File

@ -19,7 +19,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
@ -41,7 +41,7 @@ jobs:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |


+ 21
- 0
CHANGELOG.md View File

@ -2,6 +2,27 @@
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35.2
February 28, 2022
Special thanks to external contributors on this release: @ashcherbakov, @yihuang, @waelsy123
### IMPROVEMENTS
- [consensus] [\#7875](https://github.com/tendermint/tendermint/pull/7875) additional timing metrics. (@williambanfield)
### BUG FIXES
- [abci] [\#7990](https://github.com/tendermint/tendermint/pull/7990) revert buffer limit change. (@williambanfield)
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
- [cli] [\#7869](https://github.com/tendermint/tendermint/pull/7869) Update unsafe-reset-all command to match release v35. (waelsy123)
- [light] [\#7640](https://github.com/tendermint/tendermint/pull/7640) Light Client: fix absence proof verification (@ashcherbakov)
- [light] [\#7641](https://github.com/tendermint/tendermint/pull/7641) Light Client: fix querying against the latest height (@ashcherbakov)
- [mempool] [\#7718](https://github.com/tendermint/tendermint/pull/7718) return duplicate tx errors more consistently. (@tychoish)
- [rpc] [\#7744](https://github.com/tendermint/tendermint/pull/7744) fix layout of endpoint list. (@creachadair)
- [statesync] [\#7886](https://github.com/tendermint/tendermint/pull/7886) assert app version matches. (@cmwaters)
## v0.35.1
January 26, 2022


+ 1
- 0
CHANGELOG_PENDING.md View File

@ -19,6 +19,7 @@ Special thanks to external contributors on this release:
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
- [cli] \#8081 make the reset command safe to use. (@marbar3778)
- Apps


+ 8
- 3
Makefile View File

@ -92,6 +92,12 @@ proto-gen:
@$(DOCKER_PROTO_BUILDER) buf generate --template=./buf.gen.yaml --config ./buf.yaml
.PHONY: proto-gen
# TODO: Should be removed when work on ABCI++ is complete.
# For more information, see https://github.com/tendermint/tendermint/issues/8066
abci-proto-gen:
./scripts/abci-gen.sh
.PHONY: abci-proto-gen
proto-lint:
@$(DOCKER_PROTO_BUILDER) buf lint --error-format=json --config ./buf.yaml
.PHONY: proto-lint
@ -222,9 +228,7 @@ build-docs:
mkdir -p ~/output/$${path_prefix} ; \
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
cp ~/output/$${path_prefix}/index.html ~/output ; \
done < versions ; \
mkdir -p ~/output/master ; \
cp -r .vuepress/dist/* ~/output/master/
done < versions ;
.PHONY: build-docs
###############################################################################
@ -331,3 +335,4 @@ split-test-packages:$(BUILDDIR)/packages.txt
split -d -n l/$(NUM_SPLIT) $< $<.
test-group-%:split-test-packages
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out

+ 0
- 33
abci/client/creators.go View File

@ -1,33 +0,0 @@
package abciclient
import (
"fmt"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
// Creator creates new ABCI clients.
type Creator func(log.Logger) (Client, error)
// NewLocalCreator returns a Creator for the given app,
// which will be running locally.
func NewLocalCreator(app types.Application) Creator {
return func(logger log.Logger) (Client, error) {
return NewLocalClient(logger, app), nil
}
}
// NewRemoteCreator returns a Creator for the given address (e.g.
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
// want the client to connect before reporting success.
func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator {
return func(log.Logger) (Client, error) {
remoteApp, err := NewClient(logger, addr, transport, mustConnect)
if err != nil {
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
}
return remoteApp, nil
}
}

+ 0
- 85
abci/client/socket_client_test.go View File

@ -1,85 +0,0 @@
package abciclient_test
import (
"context"
"fmt"
"testing"
"time"
"math/rand"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
)
func TestProperSyncCalls(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
app := slowApp{}
logger := log.NewNopLogger()
_, c := setupClientServer(ctx, t, logger, app)
resp := make(chan error, 1)
go func() {
rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
assert.NoError(t, c.Flush(ctx))
assert.NotNil(t, rsp)
select {
case <-ctx.Done():
case resp <- c.Error():
}
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.NoError(t, err, "This should return success")
}
}
func setupClientServer(
ctx context.Context,
t *testing.T,
logger log.Logger,
app types.Application,
) (service.Service, abciclient.Client) {
t.Helper()
// some port between 20k and 30k
port := 20000 + rand.Int31()%10000
addr := fmt.Sprintf("localhost:%d", port)
s, err := server.NewServer(logger, addr, "socket", app)
require.NoError(t, err)
require.NoError(t, s.Start(ctx))
t.Cleanup(s.Wait)
c := abciclient.NewSocketClient(logger, addr, true)
require.NoError(t, c.Start(ctx))
t.Cleanup(c.Wait)
require.True(t, s.IsRunning())
require.True(t, c.IsRunning())
return s, c
}
type slowApp struct {
types.BaseApplication
}
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseFinalizeBlock{}
}

+ 20
- 17
abci/cmd/abci-cli/abci-cli.go View File

@ -125,7 +125,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) {
cmd.AddCommand(consoleCmd)
cmd.AddCommand(echoCmd)
cmd.AddCommand(infoCmd)
cmd.AddCommand(deliverTxCmd)
cmd.AddCommand(finalizeBlockCmd)
cmd.AddCommand(checkTxCmd)
cmd.AddCommand(commitCmd)
cmd.AddCommand(versionCmd)
@ -150,10 +150,9 @@ where example.file looks something like:
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
finalize_block 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
finalize_block 0x01 0x04 0xff
info
`,
Args: cobra.ExactArgs(0),
@ -169,7 +168,7 @@ This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"},
RunE: cmdConsole,
}
@ -188,11 +187,11 @@ var infoCmd = &cobra.Command{
RunE: cmdInfo,
}
var deliverTxCmd = &cobra.Command{
Use: "deliver_tx",
Short: "deliver a new transaction to the application",
Long: "deliver a new transaction to the application",
Args: cobra.ExactArgs(1),
var finalizeBlockCmd = &cobra.Command{
Use: "finalize_block",
Short: "deliver a block of transactions to the application",
Long: "deliver a block of transactions to the application",
Args: cobra.MinimumNArgs(1),
RunE: cmdFinalizeBlock,
}
@ -426,7 +425,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
return cmdCheckTx(cmd, actualArgs)
case "commit":
return cmdCommit(cmd, actualArgs)
case "deliver_tx":
case "finalize_block":
return cmdFinalizeBlock(cmd, actualArgs)
case "echo":
return cmdEcho(cmd, actualArgs)
@ -500,19 +499,23 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "want the tx",
Log: "Must provide at least one transaction",
})
return nil
}
txBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
txs := make([][]byte, len(args))
for i, arg := range args {
txBytes, err := stringOrHexToBytes(arg)
if err != nil {
return err
}
txs[i] = txBytes
}
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs})
if err != nil {
return err
}
for _, tx := range res.Txs {
for _, tx := range res.TxResults {
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,


+ 4
- 4
abci/example/example_test.go View File

@ -84,8 +84,8 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap
// Send bulk request
res, err := client.FinalizeBlock(ctx, rfb)
require.NoError(t, err)
require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match")
for _, tx := range res.Txs {
require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match")
for _, tx := range res.TxResults {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
@ -138,8 +138,8 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
// Send request
response, err := client.FinalizeBlock(ctx, &rfb)
require.NoError(t, err, "Error in GRPC FinalizeBlock")
require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match")
for _, tx := range response.Txs {
require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match")
for _, tx := range response.TxResults {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
}

+ 14
- 14
abci/example/kvstore/kvstore.go View File

@ -117,7 +117,7 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *Application) handleTx(tx []byte) *types.ResponseDeliverTx {
func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(tx) {
@ -156,7 +156,7 @@ func (app *Application) handleTx(tx []byte) *types.ResponseDeliverTx {
},
}
return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events}
}
func (app *Application) Close() error {
@ -190,12 +190,12 @@ func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.Resp
}
}
respTxs := make([]*types.ResponseDeliverTx, len(req.Txs))
respTxs := make([]*types.ExecTxResult, len(req.Txs))
for i, tx := range req.Txs {
respTxs[i] = app.handleTx(tx)
}
return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates}
return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}
}
func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
@ -338,13 +338,13 @@ func isValidatorTx(tx []byte) bool {
// format is "val:pubkey!power"
// pubkey is a base64-encoded 32-byte ed25519 key
func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
func (app *Application) execValidatorTx(tx []byte) *types.ExecTxResult {
tx = tx[len(ValidatorSetChangePrefix):]
// get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "!")
if len(pubKeyAndPower) != 2 {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
}
@ -353,7 +353,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
// decode the pubkey
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
if err != nil {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
}
@ -361,7 +361,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
// decode the power
power, err := strconv.ParseInt(powerS, 10, 64)
if err != nil {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
}
@ -371,7 +371,7 @@ func (app *Application) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
}
// add, update, or remove a validator
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx {
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ExecTxResult {
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
panic(fmt.Errorf("can't decode public key: %w", err))
@ -386,7 +386,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response
}
if !hasKey {
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
}
@ -398,7 +398,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
if err := types.WriteMessage(&v, value); err != nil {
return &types.ResponseDeliverTx{
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("error encoding validator: %v", err)}
}
@ -411,7 +411,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) *types.Response
// we only update the changes array if we successfully updated the tree
app.ValUpdates = append(app.ValUpdates, v)
return &types.ResponseDeliverTx{Code: code.CodeTypeOK}
return &types.ExecTxResult{Code: code.CodeTypeOK}
}
// -----------------------------
@ -425,9 +425,9 @@ func isPrepareTx(tx []byte) bool {
// execPrepareTx is noop. tx data is considered as placeholder
// and is substitute at the PrepareProposal.
func (app *Application) execPrepareTx(tx []byte) *types.ResponseDeliverTx {
func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
// noop
return &types.ResponseDeliverTx{}
return &types.ExecTxResult{}
}
// substPrepareTx subst all the preparetx in the blockdata


+ 9
- 10
abci/example/kvstore/kvstore_test.go View File

@ -27,12 +27,12 @@ const (
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// repeating tx doesn't raise error
ar = app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// commit
app.Commit()
@ -107,7 +107,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := tmproto.Header{
Height: height,
}
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height})
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@ -196,7 +196,6 @@ func makeApplyBlock(
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Height: height,
Txs: txs,
})
@ -326,13 +325,13 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// repeating FinalizeBlock doesn't raise error
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// commit
_, err = app.Commit(ctx)
require.NoError(t, err)


+ 1
- 1
abci/tests/server/client.go View File

@ -51,7 +51,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
for i, tx := range res.Txs {
for i, tx := range res.TxResults {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp[i] {
fmt.Println("Failed test: FinalizeBlock")


+ 2
- 2
abci/tests/test_cli/ex1.abci View File

@ -1,10 +1,10 @@
echo hello
info
commit
deliver_tx "abc"
finalize_block "abc"
info
commit
query "abc"
deliver_tx "def=xyz"
finalize_block "def=xyz" "ghi=123"
commit
query "def"

+ 5
- 3
abci/tests/test_cli/ex1.abci.out View File

@ -12,7 +12,7 @@
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
> finalize_block "abc"
-> code: OK
> info
@ -33,12 +33,14 @@
-> value: abc
-> value.hex: 616263
> deliver_tx "def=xyz"
> finalize_block "def=xyz" "ghi=123"
-> code: OK
> finalize_block "def=xyz" "ghi=123"
-> code: OK
> commit
-> code: OK
-> data.hex: 0x0400000000000000
-> data.hex: 0x0600000000000000
> query "def"
-> code: OK


+ 3
- 3
abci/tests/test_cli/ex2.abci View File

@ -1,7 +1,7 @@
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
finalize_block 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
finalize_block 0x01
finalize_block 0x04
info

+ 5
- 5
abci/tests/test_cli/ex2.abci.out View File

@ -4,20 +4,20 @@
> check_tx 0xff
-> code: OK
> deliver_tx 0x00
> finalize_block 0x00
-> code: OK
> check_tx 0x00
-> code: OK
> deliver_tx 0x01
> finalize_block 0x01
-> code: OK
> deliver_tx 0x04
> finalize_block 0x04
-> code: OK
> info
-> code: OK
-> data: {"hashes":0,"txs":3}
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
-> data: {"size":3}
-> data.hex: 0x7B2273697A65223A337D

+ 3
- 3
abci/types/application.go View File

@ -103,12 +103,12 @@ func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProce
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
txs := make([]*ResponseDeliverTx, len(req.Txs))
txs := make([]*ExecTxResult, len(req.Txs))
for i := range req.Txs {
txs[i] = &ResponseDeliverTx{Code: CodeTypeOK}
txs[i] = &ExecTxResult{Code: CodeTypeOK}
}
return ResponseFinalizeBlock{
Txs: txs,
TxResults: txs,
}
}


+ 1
- 1
abci/types/messages_test.go View File

@ -13,7 +13,7 @@ import (
)
func TestMarshalJSON(t *testing.T) {
b, err := json.Marshal(&ResponseDeliverTx{})
b, err := json.Marshal(&ExecTxResult{Code: 1})
assert.NoError(t, err)
// include empty fields.
assert.True(t, strings.Contains(string(b), "code"))


+ 10
- 0
abci/types/result.go View File

@ -33,6 +33,16 @@ func (r ResponseDeliverTx) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ExecTxResult) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ExecTxResult) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ResponseQuery) IsOK() bool {
return r.Code == CodeTypeOK


+ 1505
- 1446
abci/types/types.pb.go
File diff suppressed because it is too large
View File


+ 1
- 1
cmd/tendermint/commands/reindex_event.go View File

@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
Height: b.Height,
Index: uint32(i),
Tx: b.Data.Txs[i],
Result: *(r.FinalizeBlock.Txs[i]),
Result: *(r.FinalizeBlock.TxResults[i]),
}
_ = batch.Add(&tr)


+ 2
- 2
cmd/tendermint/commands/reindex_event_test.go View File

@ -153,10 +153,10 @@ func TestReIndexEvent(t *testing.T) {
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ResponseDeliverTx{}
dtx := abcitypes.ExecTxResult{}
abciResp := &prototmstate.ABCIResponses{
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
Txs: []*abcitypes.ResponseDeliverTx{&dtx},
TxResults: []*abcitypes.ExecTxResult{&dtx},
},
}


+ 76
- 3
cmd/tendermint/commands/reset_priv_validator.go View File

@ -2,6 +2,7 @@ package commands
import (
"os"
"path/filepath"
"github.com/spf13/cobra"
@ -31,6 +32,20 @@ func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command
return cmd
}
// MakeResetStateCommand constructs a command that removes the database of
// the specified Tendermint core instance.
func MakeResetStateCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
return &cobra.Command{
Use: "reset-state",
Short: "Remove all the data and WAL",
RunE: func(cmd *cobra.Command, args []string) error {
return resetState(conf.DBDir(), logger, keyType)
},
}
}
func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
@ -55,18 +70,76 @@ func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *c
// it's only suitable for testnets.
// resetAll removes address book files plus all data, and resets the privValdiator data.
// Exported so other CLI tools can use it.
func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
}
// recreate the dbDir since the privVal state needs to live there
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
}
// resetState removes address book files plus all databases.
func resetState(dbDir string, logger log.Logger, keyType string) error {
blockdb := filepath.Join(dbDir, "blockstore.db")
state := filepath.Join(dbDir, "state.db")
wal := filepath.Join(dbDir, "cs.wal")
evidence := filepath.Join(dbDir, "evidence.db")
txIndex := filepath.Join(dbDir, "tx_index.db")
peerstore := filepath.Join(dbDir, "peerstore.db")
if tmos.FileExists(blockdb) {
if err := os.RemoveAll(blockdb); err == nil {
logger.Info("Removed all blockstore.db", "dir", blockdb)
} else {
logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err)
}
}
if tmos.FileExists(state) {
if err := os.RemoveAll(state); err == nil {
logger.Info("Removed all state.db", "dir", state)
} else {
logger.Error("error removing all state.db", "dir", state, "err", err)
}
}
if tmos.FileExists(wal) {
if err := os.RemoveAll(wal); err == nil {
logger.Info("Removed all cs.wal", "dir", wal)
} else {
logger.Error("error removing all cs.wal", "dir", wal, "err", err)
}
}
if tmos.FileExists(evidence) {
if err := os.RemoveAll(evidence); err == nil {
logger.Info("Removed all evidence.db", "dir", evidence)
} else {
logger.Error("error removing all evidence.db", "dir", evidence, "err", err)
}
}
if tmos.FileExists(txIndex) {
if err := os.RemoveAll(txIndex); err == nil {
logger.Info("Removed tx_index.db", "dir", txIndex)
} else {
logger.Error("error removing tx_index.db", "dir", txIndex, "err", err)
}
}
if tmos.FileExists(peerstore) {
if err := os.RemoveAll(peerstore); err == nil {
logger.Info("Removed peerstore.db", "dir", peerstore)
} else {
logger.Error("error removing peerstore.db", "dir", peerstore, "err", err)
}
}
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
return nil
}
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {


+ 1
- 0
cmd/tendermint/main.go View File

@ -34,6 +34,7 @@ func main() {
commands.MakeReplayCommand(conf, logger),
commands.MakeReplayConsoleCommand(conf, logger),
commands.MakeResetAllCommand(conf, logger),
commands.MakeResetStateCommand(conf, logger),
commands.MakeResetPrivateValidatorCommand(conf, logger),
commands.MakeShowValidatorCommand(conf, logger),
commands.MakeTestnetFilesCommand(conf, logger),


+ 16
- 16
docs/app-dev/abci-cli.md View File

@ -27,17 +27,17 @@ Usage:
abci-cli [command]
Available Commands:
batch Run a batch of abci commands against an application
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
deliver_tx Deliver a new tx to the application
kvstore ABCI demo example
echo Have the application echo a message
help Help about any command
info Get some info about the application
query Query the application state
set_option Set an options on the application
batch Run a batch of abci commands against an application
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
finalize_block Send a set of transactions to the application
kvstore ABCI demo example
echo Have the application echo a message
help Help about any command
info Get some info about the application
query Query the application state
set_option Set an options on the application
Flags:
--abci string socket or grpc (default "socket")
@ -53,7 +53,7 @@ Use "abci-cli [command] --help" for more information about a command.
The `abci-cli` tool lets us send ABCI messages to our application, to
help build and debug them.
The most important messages are `deliver_tx`, `check_tx`, and `commit`,
The most important messages are `finalize_block`, `check_tx`, and `commit`,
but there are others for convenience, configuration, and information
purposes.
@ -173,7 +173,7 @@ Try running these commands:
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
> finalize_block "abc"
-> code: OK
> info
@ -192,7 +192,7 @@ Try running these commands:
-> value: abc
-> value.hex: 616263
> deliver_tx "def=xyz"
> finalize_block "def=xyz"
-> code: OK
> commit
@ -207,8 +207,8 @@ Try running these commands:
-> value.hex: 78797A
```
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if
we do `finalize_block "abc=efg"` it will store `(abc, efg)`.
Similarly, you could put the commands in a file and run
`abci-cli --verbose batch < myfile`.


+ 95
- 0
docs/tendermint-core/consensus/proposer-based-timestamps.md View File

@ -0,0 +1,95 @@
---
order: 3
---
# PBTS
This document provides an overview of the Proposer-Based Timestamp (PBTS)
algorithm added to Tendermint in the v0.36 release. It outlines the core
functionality as well as the parameters and constraints of the this algorithm.
## Algorithm Overview
The PBTS algorithm defines a way for a Tendermint blockchain to create block
timestamps that are within a reasonable bound of the clocks of the validators on
the network. This replaces the original BFTTime algorithm for timestamp
assignment that relied on the timestamps included in precommit messages.
## Algorithm Parameters
The functionality of the PBTS algorithm is governed by two parameters within
Tendermint. These two parameters are [consensus
parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291),
meaning they are configured by the ABCI application and are expected to be the
same across all nodes on the network.
### `Precision`
The `Precision` parameter configures the acceptable upper-bound of clock drift
among all of the nodes on a Tendermint network. Any two nodes on a Tendermint
network are expected to have clocks that differ by at most `Precision`
milliseconds any given instant.
### `MessageDelay`
The `MessageDelay` parameter configures the acceptable upper-bound for
transmitting a `Proposal` message from the proposer to _all_ of the validators
on the network.
Networks should choose as small a value for `MessageDelay` as is practical,
provided it is large enough that messages can reach all participants with high
probability given the number of participants and latency of their connections.
## Algorithm Concepts
### Block timestamps
Each block produced by the Tendermint consensus engine contains a timestamp.
The timestamp produced in each block is a meaningful representation of time that is
useful for the protocols and applications built on top of Tendermint.
The following protocols and application features require a reliable source of time:
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21
days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime).
* IBC packets can use either a [timestamp or a height to timeout packet
delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements)
### Proposer Selects a Block Timestamp
When the proposer node creates a new block proposal, the node reads the time
from its local clock and uses this reading as the timestamp for the proposed
block.
### Timeliness
When each validator on a Tendermint network receives a proposed block, it
performs a series of checks to ensure that the block can be considered valid as
a candidate to be the next block in the chain.
The PBTS algorithm performs a validity check on the timestamp of proposed
blocks. When a validator receives a proposal it ensures that the timestamp in
the proposal is within a bound of the validator's local clock. Specifically, the
algorithm checks that the timestamp is no more than `Precision` greater than the
node's local clock and no less than `Precision` + `MessageDelay` behind than the
node's local clock. This creates range of acceptable timestamps around the
node's local time. If the timestamp is within this range, the PBTS algorithm
considers the block **timely**. If a block is not **timely**, the node will
issue a `nil` `prevote` for this block, signaling to the rest of the network
that the node does not consider the block to be valid.
### Clock Synchronization
The PBTS algorithm requires the clocks of the validators on a Tendermint network
are within `Precision` of each other. In practice, this means that validators
should periodically synchronize to a reliable NTP server. Validators that drift
too far away from the rest of the network will no longer propose blocks with
valid timestamps. Additionally they will not view the timestamps of blocks
proposed by their peers to be valid either.
## See Also
* [The PBTS specification](https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md)
contains all of the details of the algorithm.

+ 1
- 0
docs/versions View File

@ -1,3 +1,4 @@
master master
v0.33.x v0.33
v0.34.x v0.34
v0.35.x v0.35

+ 7
- 3
go.mod View File

@ -34,11 +34,17 @@ require (
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
google.golang.org/grpc v1.44.0
google.golang.org/grpc v1.45.0
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
pgregory.net/rapid v0.4.7
)
require (
github.com/creachadair/atomicfile v0.2.4
github.com/google/go-cmp v0.5.7
gotest.tools v2.2.0+incompatible
)
require (
4d63.com/gochecknoglobals v0.1.0 // indirect
github.com/Antonboom/errname v0.1.5 // indirect
@ -67,7 +73,6 @@ require (
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect
github.com/containerd/continuity v0.2.1 // indirect
github.com/daixiang0/gci v0.3.1-0.20220208004058-76d765e3ab48 // indirect
github.com/creachadair/atomicfile v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denis-tingajkin/go-header v0.4.2 // indirect
github.com/dgraph-io/badger/v2 v2.2007.2 // indirect
@ -107,7 +112,6 @@ require (
github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
github.com/gostaticanalysis/comment v1.4.2 // indirect


+ 2
- 2
go.sum View File

@ -1625,8 +1625,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=


+ 25
- 19
internal/blocksync/reactor.go View File

@ -70,6 +70,8 @@ type Reactor struct {
// immutable
initialState sm.State
// store
stateStore sm.Store
blockExec *sm.BlockExecutor
store *store.BlockStore
@ -101,7 +103,7 @@ type Reactor struct {
func NewReactor(
ctx context.Context,
logger log.Logger,
state sm.State,
stateStore sm.Store,
blockExec *sm.BlockExecutor,
store *store.BlockStore,
consReactor consensusReactor,
@ -111,19 +113,6 @@ func NewReactor(
metrics *consensus.Metrics,
eventBus *eventbus.EventBus,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
}
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
requestsCh := make(chan BlockRequest, maxTotalRequesters)
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
blockSyncCh, err := channelCreator(ctx, GetChannelDescriptor())
if err != nil {
return nil, err
@ -131,20 +120,16 @@ func NewReactor(
r := &Reactor{
logger: logger,
initialState: state,
stateStore: stateStore,
blockExec: blockExec,
store: store,
pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh),
consReactor: consReactor,
blockSync: newAtomicBool(blockSync),
requestsCh: requestsCh,
errorsCh: errorsCh,
blockSyncCh: blockSyncCh,
blockSyncOutBridgeCh: make(chan p2p.Envelope),
peerUpdates: peerUpdates,
metrics: metrics,
eventBus: eventBus,
syncStartTime: time.Time{},
}
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
@ -159,6 +144,27 @@ func NewReactor(
// If blockSync is enabled, we also start the pool and the pool processing
// goroutine. If the pool fails to start, an error is returned.
func (r *Reactor) OnStart(ctx context.Context) error {
state, err := r.stateStore.Load()
if err != nil {
return err
}
r.initialState = state
if state.LastBlockHeight != r.store.Height() {
return fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, r.store.Height())
}
startHeight := r.store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
requestsCh := make(chan BlockRequest, maxTotalRequesters)
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
r.pool = NewBlockPool(r.logger, startHeight, requestsCh, errorsCh)
r.requestsCh = requestsCh
r.errorsCh = errorsCh
if r.blockSync.IsSet() {
if err := r.pool.Start(ctx); err != nil {
return err


+ 10
- 5
internal/blocksync/reactor_test.go View File

@ -14,6 +14,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
@ -33,7 +34,7 @@ type reactorTestSuite struct {
nodes []types.NodeID
reactors map[types.NodeID]*Reactor
app map[types.NodeID]proxy.AppConns
app map[types.NodeID]abciclient.Client
blockSyncChannels map[types.NodeID]*p2p.Channel
peerChans map[types.NodeID]chan p2p.PeerUpdate
@ -64,7 +65,7 @@ func setup(
network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}),
nodes: make([]types.NodeID, 0, numNodes),
reactors: make(map[types.NodeID]*Reactor, numNodes),
app: make(map[types.NodeID]proxy.AppConns, numNodes),
app: make(map[types.NodeID]abciclient.Client, numNodes),
blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
@ -109,7 +110,7 @@ func (rts *reactorTestSuite) addNode(
logger := log.TestingLogger()
rts.nodes = append(rts.nodes, nodeID)
rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics())
rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics())
require.NoError(t, rts.app[nodeID].Start(ctx))
blockDB := dbm.NewMemDB()
@ -121,13 +122,17 @@ func (rts *reactorTestSuite) addNode(
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
eventbus := eventbus.NewDefault(logger)
require.NoError(t, eventbus.Start(ctx))
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
rts.app[nodeID].Consensus(),
rts.app[nodeID],
mock.Mempool{},
sm.EmptyEvidencePool{},
blockStore,
eventbus,
)
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
@ -176,7 +181,7 @@ func (rts *reactorTestSuite) addNode(
rts.reactors[nodeID], err = NewReactor(
ctx,
rts.logger.With("nodeID", nodeID),
state.Copy(),
stateStore,
blockExec,
blockStore,
nil,


+ 165
- 158
internal/consensus/byzantine_test.go View File

@ -82,37 +82,33 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
log.TestingLogger().With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
0,
)
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
// Make a full instance of the evidence pool
evidenceDB := dbm.NewMemDB()
evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
evpool := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
// Make State
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(ctx, logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx, logger, thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
require.NoError(t, err)
// set private validator
pv := privVals[i]
cs.SetPrivValidator(ctx, pv)
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
err = eventBus.Start(ctx)
require.NoError(t, err)
cs.SetEventBus(eventBus)
evpool.SetEventBus(eventBus)
cs.SetTimeoutTicker(tickerFunc())
states[i] = cs
}()
}
rts := setup(ctx, t, nValidators, states, 100) // buffer must be large enough to not deadlock
rts := setup(ctx, t, nValidators, states, 512) // buffer must be large enough to not deadlock
var bzNodeID types.NodeID
@ -238,8 +234,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
}
for _, reactor := range rts.reactors {
state := reactor.state.GetState()
reactor.SwitchToConsensus(ctx, state, false)
reactor.SwitchToConsensus(ctx, reactor.state.GetState(), false)
}
// Evidence should be submitted and committed at the third height but
@ -248,20 +243,26 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
var wg sync.WaitGroup
i := 0
subctx, subcancel := context.WithCancel(ctx)
defer subcancel()
for _, sub := range rts.subs {
wg.Add(1)
go func(j int, s eventbus.Subscription) {
defer wg.Done()
for {
if ctx.Err() != nil {
if subctx.Err() != nil {
return
}
msg, err := s.Next(subctx)
if subctx.Err() != nil {
return
}
msg, err := s.Next(ctx)
assert.NoError(t, err)
if err != nil {
cancel()
t.Errorf("waiting for subscription: %v", err)
subcancel()
return
}
@ -273,12 +274,18 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
}
}
}(i, sub)
i++
}
wg.Wait()
// don't run more assertions if we've encountered a timeout
select {
case <-subctx.Done():
t.Fatal("encountered timeout")
default:
}
pubkey, err := bzNodeState.privValidator.GetPubKey(ctx)
require.NoError(t, err)
@ -317,42 +324,42 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// blocksSubs := make([]types.Subscription, n)
// reactors := make([]p2p.Reactor, n)
// for i := 0; i < n; i++ {
// // enable txs so we can create different proposals
// assertMempool(states[i].txNotifier).EnableTxsAvailable()
// // enable txs so we can create different proposals
// assertMempool(states[i].txNotifier).EnableTxsAvailable()
// eventBus := states[i].eventBus
// eventBus.SetLogger(logger.With("module", "events", "validator", i))
// eventBus := states[i].eventBus
// eventBus.SetLogger(logger.With("module", "events", "validator", i))
// var err error
// blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock)
// require.NoError(t, err)
// var err error
// blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock)
// require.NoError(t, err)
// conR := NewReactor(states[i], true) // so we don't start the consensus states
// conR.SetLogger(logger.With("validator", i))
// conR.SetEventBus(eventBus)
// conR := NewReactor(states[i], true) // so we don't start the consensus states
// conR.SetLogger(logger.With("validator", i))
// conR.SetEventBus(eventBus)
// var conRI p2p.Reactor = conR
// var conRI p2p.Reactor = conR
// // make first val byzantine
// if i == 0 {
// conRI = NewByzantineReactor(conR)
// }
// // make first val byzantine
// if i == 0 {
// conRI = NewByzantineReactor(conR)
// }
// reactors[i] = conRI
// err = states[i].blockExec.Store().Save(states[i].state) // for save height 1's validators info
// require.NoError(t, err)
// reactors[i] = conRI
// err = states[i].blockExec.Store().Save(states[i].state) // for save height 1's validators info
// require.NoError(t, err)
// }
// switches := p2p.MakeConnectedSwitches(config.P2P, N, func(i int, sw *p2p.Switch) *p2p.Switch {
// sw.SetLogger(p2pLogger.With("validator", i))
// sw.AddReactor("CONSENSUS", reactors[i])
// return sw
// sw.SetLogger(p2pLogger.With("validator", i))
// sw.AddReactor("CONSENSUS", reactors[i])
// return sw
// }, func(sws []*p2p.Switch, i, j int) {
// // the network starts partitioned with globally active adversary
// if i != 0 {
// return
// }
// p2p.Connect2Switches(sws, i, j)
// // the network starts partitioned with globally active adversary
// if i != 0 {
// return
// }
// p2p.Connect2Switches(sws, i, j)
// })
// // make first val byzantine
@ -360,26 +367,26 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// // do any safety checks.
// states[0].privValidator.(types.MockPV).DisableChecks()
// states[0].decideProposal = func(j int32) func(int64, int32) {
// return func(height int64, round int32) {
// byzantineDecideProposalFunc(t, height, round, states[j], switches[j])
// }
// return func(height int64, round int32) {
// byzantineDecideProposalFunc(t, height, round, states[j], switches[j])
// }
// }(int32(0))
// // We are setting the prevote function to do nothing because the prevoting
// // and precommitting are done alongside the proposal.
// states[0].doPrevote = func(height int64, round int32) {}
// defer func() {
// for _, sw := range switches {
// err := sw.Stop()
// require.NoError(t, err)
// }
// for _, sw := range switches {
// err := sw.Stop()
// require.NoError(t, err)
// }
// }()
// // start the non-byz state machines.
// // note these must be started before the byz
// for i := 1; i < n; i++ {
// cr := reactors[i].(*Reactor)
// cr.SwitchToConsensus(cr.conS.GetState(), false)
// cr := reactors[i].(*Reactor)
// cr.SwitchToConsensus(cr.conS.GetState(), false)
// }
// // start the byzantine state machine
@ -411,146 +418,146 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// // (one of them already has)
// wg := new(sync.WaitGroup)
// for i := 1; i < N-1; i++ {
// wg.Add(1)
// go func(j int) {
// <-blocksSubs[j].Out()
// wg.Done()
// }(i)
// wg.Add(1)
// go func(j int) {
// <-blocksSubs[j].Out()
// wg.Done()
// }(i)
// }
// done := make(chan struct{})
// go func() {
// wg.Wait()
// close(done)
// wg.Wait()
// close(done)
// }()
// tick := time.NewTicker(time.Second * 10)
// select {
// case <-done:
// case <-tick.C:
// for i, reactor := range reactors {
// t.Log(fmt.Sprintf("Consensus Reactor %v", i))
// t.Log(fmt.Sprintf("%v", reactor))
// }
// t.Fatalf("Timed out waiting for all validators to commit first block")
// for i, reactor := range reactors {
// t.Log(fmt.Sprintf("Consensus Reactor %v", i))
// t.Log(fmt.Sprintf("%v", reactor))
// }
// t.Fatalf("Timed out waiting for all validators to commit first block")
// }
}
// func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) {
// // byzantine user should create two proposals and try to split the vote.
// // Avoid sending on internalMsgQueue and running consensus state.
// // Create a new proposal block from state/txs from the mempool.
// block1, blockParts1 := cs.createProposalBlock()
// polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
// proposal1 := types.NewProposal(height, round, polRound, propBlockID)
// p1 := proposal1.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
// t.Error(err)
// }
// proposal1.Signature = p1.Signature
// // some new transactions come in (this ensures that the proposals are different)
// deliverTxsRange(cs, 0, 1)
// // Create a new proposal block from state/txs from the mempool.
// block2, blockParts2 := cs.createProposalBlock()
// polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
// proposal2 := types.NewProposal(height, round, polRound, propBlockID)
// p2 := proposal2.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
// t.Error(err)
// }
// proposal2.Signature = p2.Signature
// block1Hash := block1.Hash()
// block2Hash := block2.Hash()
// // broadcast conflicting proposals/block parts to peers
// peers := sw.Peers().List()
// t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
// for i, peer := range peers {
// if i < len(peers)/2 {
// go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
// } else {
// go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
// }
// }
// // byzantine user should create two proposals and try to split the vote.
// // Avoid sending on internalMsgQueue and running consensus state.
// // Create a new proposal block from state/txs from the mempool.
// block1, blockParts1 := cs.createProposalBlock()
// polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
// proposal1 := types.NewProposal(height, round, polRound, propBlockID)
// p1 := proposal1.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
// t.Error(err)
// }
// proposal1.Signature = p1.Signature
// // some new transactions come in (this ensures that the proposals are different)
// deliverTxsRange(cs, 0, 1)
// // Create a new proposal block from state/txs from the mempool.
// block2, blockParts2 := cs.createProposalBlock()
// polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
// proposal2 := types.NewProposal(height, round, polRound, propBlockID)
// p2 := proposal2.ToProto()
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
// t.Error(err)
// }
// proposal2.Signature = p2.Signature
// block1Hash := block1.Hash()
// block2Hash := block2.Hash()
// // broadcast conflicting proposals/block parts to peers
// peers := sw.Peers().List()
// t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
// for i, peer := range peers {
// if i < len(peers)/2 {
// go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
// } else {
// go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
// }
// }
// }
// func sendProposalAndParts(
// height int64,
// round int32,
// cs *State,
// peer p2p.Peer,
// proposal *types.Proposal,
// blockHash []byte,
// parts *types.PartSet,
// height int64,
// round int32,
// cs *State,
// peer p2p.Peer,
// proposal *types.Proposal,
// blockHash []byte,
// parts *types.PartSet,
// ) {
// // proposal
// msg := &ProposalMessage{Proposal: proposal}
// peer.Send(DataChannel, MustEncode(msg))
// // parts
// for i := 0; i < int(parts.Total()); i++ {
// part := parts.GetPart(i)
// msg := &BlockPartMessage{
// Height: height, // This tells peer that this part applies to us.
// Round: round, // This tells peer that this part applies to us.
// Part: part,
// }
// peer.Send(DataChannel, MustEncode(msg))
// }
// // votes
// cs.mtx.Lock()
// prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
// precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
// cs.mtx.Unlock()
// peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
// peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
// // proposal
// msg := &ProposalMessage{Proposal: proposal}
// peer.Send(DataChannel, MustEncode(msg))
// // parts
// for i := 0; i < int(parts.Total()); i++ {
// part := parts.GetPart(i)
// msg := &BlockPartMessage{
// Height: height, // This tells peer that this part applies to us.
// Round: round, // This tells peer that this part applies to us.
// Part: part,
// }
// peer.Send(DataChannel, MustEncode(msg))
// }
// // votes
// cs.mtx.Lock()
// prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
// precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
// cs.mtx.Unlock()
// peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
// peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
// }
// type ByzantineReactor struct {
// service.Service
// reactor *Reactor
// service.Service
// reactor *Reactor
// }
// func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
// return &ByzantineReactor{
// Service: conR,
// reactor: conR,
// }
// return &ByzantineReactor{
// Service: conR,
// reactor: conR,
// }
// }
// func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
// func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
// func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
// if !br.reactor.IsRunning() {
// return
// }
// // Create peerState for peer
// peerState := NewPeerState(peer).SetLogger(br.reactor.logger)
// peer.Set(types.PeerStateKey, peerState)
// // Send our state to peer.
// // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
// if !br.reactor.waitSync {
// br.reactor.sendNewRoundStepMessage(peer)
// }
// if !br.reactor.IsRunning() {
// return
// }
// // Create peerState for peer
// peerState := NewPeerState(peer).SetLogger(br.reactor.logger)
// peer.Set(types.PeerStateKey, peerState)
// // Send our state to peer.
// // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
// if !br.reactor.waitSync {
// br.reactor.sendNewRoundStepMessage(peer)
// }
// }
// func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// br.reactor.RemovePeer(peer, reason)
// br.reactor.RemovePeer(peer, reason)
// }
// func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
// br.reactor.Receive(chID, peer, msgBytes)
// br.reactor.Receive(chID, peer, msgBytes)
// }
// func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }

+ 20
- 10
internal/consensus/common_test.go View File

@ -370,7 +370,11 @@ func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte)
vote := msg.Data().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
ch <- msg
select {
case <-ctx.Done():
return ctx.Err()
case ch <- msg:
}
}
return nil
}, types.EventQueryVote); err != nil {
@ -401,7 +405,10 @@ func subscribeToVoterBuffered(ctx context.Context, t *testing.T, cs *State, addr
vote := msg.Data().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
ch <- msg
select {
case <-ctx.Done():
case ch <- msg:
}
}
}
}()
@ -462,7 +469,6 @@ func newStateWithConfigAndBlockStore(
logger.With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
0,
)
if thisConfig.Consensus.WaitForTxs() {
@ -476,22 +482,26 @@ func newStateWithConfigAndBlockStore(
stateStore := sm.NewStore(stateDB)
require.NoError(t, stateStore.Save(state))
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(ctx,
eventBus := eventbus.NewDefault(logger.With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx,
logger.With("module", "consensus"),
thisConfig.Consensus,
state,
stateStore,
blockExec,
blockStore,
mempool,
evpool,
eventBus,
)
cs.SetPrivValidator(ctx, pv)
if err != nil {
t.Fatal(err)
}
eventBus := eventbus.NewDefault(logger.With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
cs.SetPrivValidator(ctx, pv)
cs.SetEventBus(eventBus)
return cs
}


+ 33
- 10
internal/consensus/invalid_test.go View File

@ -5,6 +5,7 @@ import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -20,7 +21,7 @@ import (
)
func TestReactorInvalidPrecommit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
config := configSetup(t)
@ -49,14 +50,14 @@ func TestReactorInvalidPrecommit(t *testing.T) {
byzState := rts.states[node.NodeID]
byzReactor := rts.reactors[node.NodeID]
calledDoPrevote := false
signal := make(chan struct{})
// Update the doPrevote function to just send a valid precommit for a random
// block and otherwise disable the priv validator.
byzState.mtx.Lock()
privVal := byzState.privValidator
byzState.doPrevote = func(ctx context.Context, height int64, round int32) {
defer close(signal)
invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, privVal)
calledDoPrevote = true
}
byzState.mtx.Unlock()
@ -72,16 +73,30 @@ func TestReactorInvalidPrecommit(t *testing.T) {
go func(s eventbus.Subscription) {
defer wg.Done()
_, err := s.Next(ctx)
if ctx.Err() != nil {
return
}
if !assert.NoError(t, err) {
cancel() // cancel other subscribers on failure
}
}(sub)
}
}
wait := make(chan struct{})
go func() { defer close(wait); wg.Wait() }()
wg.Wait()
if !calledDoPrevote {
t.Fatal("test failed to run core logic")
select {
case <-wait:
if _, ok := <-signal; !ok {
t.Fatal("test condition did not fire")
}
case <-ctx.Done():
if _, ok := <-signal; !ok {
t.Fatal("test condition did not fire after timeout")
return
}
case <-signal:
// test passed
}
}
@ -130,19 +145,27 @@ func invalidDoPrevoteFunc(
cs.privValidator = nil // disable priv val so we don't do normal votes
cs.mtx.Unlock()
count := 0
r.mtx.Lock()
ids := make([]types.NodeID, 0, len(r.peers))
for _, ps := range r.peers {
ids = append(ids, ps.peerID)
}
r.mtx.Unlock()
count := 0
for _, peerID := range ids {
count++
err := r.voteCh.Send(ctx, p2p.Envelope{
To: ps.peerID,
To: peerID,
Message: &tmcons.Vote{
Vote: precommit.ToProto(),
},
})
// we want to have sent some of these votes,
// but if the test completes without erroring
// and we get here, we shouldn't error
if errors.Is(err, context.Canceled) && count > 1 {
// or not sending any messages, then we should
// error.
if errors.Is(err, context.Canceled) && count > 0 {
break
}
require.NoError(t, err)


+ 11
- 11
internal/consensus/mempool_test.go View File

@ -51,7 +51,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(t, newBlockCh)
deliverTxsRange(ctx, t, cs, 0, 1)
checkTxsRange(ctx, t, cs, 0, 1)
ensureNewEventOnChannel(t, newBlockCh) // commit txs
ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash
ensureNoNewEventOnChannel(t, newBlockCh)
@ -118,7 +118,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
round = 0
ensureNewRound(t, newRoundCh, height, round) // first round at next height
deliverTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
checkTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but don't set a proposal so we get the next round
ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
round++ // moving to the next round
@ -126,7 +126,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block
}
func deliverTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) {
func checkTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) {
t.Helper()
// Deliver some txs.
for i := start; i < end; i++ {
@ -159,7 +159,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader)
const numTxs int64 = 3000
go deliverTxsRange(ctx, t, cs, 0, int(numTxs))
go checkTxsRange(ctx, t, cs, 0, int(numTxs))
startTestRound(ctx, cs, cs.Height, cs.Round)
for n := int64(0); n < numTxs; {
@ -192,8 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resFinalize := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
assert.False(t, resFinalize.TxResults[0].IsErr(), fmt.Sprintf("expected no error. got %v", resFinalize))
resCommit := app.Commit()
assert.True(t, len(resCommit.Data) > 0)
@ -212,7 +212,7 @@ func TestMempoolRmBadTx(t *testing.T) {
checkTxRespCh <- struct{}{}
}, mempool.TxInfo{})
if err != nil {
t.Errorf("error after CheckTx: %w", err)
t.Errorf("error after CheckTx: %v", err)
return
}
@ -265,20 +265,20 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
}
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
respTxs := make([]*abci.ResponseDeliverTx, len(req.Txs))
respTxs := make([]*abci.ExecTxResult, len(req.Txs))
for i, tx := range req.Txs {
txValue := txAsUint64(tx)
if txValue != uint64(app.txCount) {
respTxs[i] = &abci.ResponseDeliverTx{
respTxs[i] = &abci.ExecTxResult{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %d, got %d", app.txCount, txValue),
}
continue
}
app.txCount++
respTxs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK}
respTxs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK}
}
return abci.ResponseFinalizeBlock{Txs: respTxs}
return abci.ResponseFinalizeBlock{TxResults: respTxs}
}
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {


+ 2
- 6
internal/consensus/reactor.go View File

@ -138,6 +138,7 @@ func NewReactor(
cs *State,
channelCreator p2p.ChannelCreator,
peerUpdates *p2p.PeerUpdates,
eventBus *eventbus.EventBus,
waitSync bool,
metrics *Metrics,
) (*Reactor, error) {
@ -166,6 +167,7 @@ func NewReactor(
state: cs,
waitSync: waitSync,
peers: make(map[types.NodeID]*PeerState),
eventBus: eventBus,
Metrics: metrics,
stateCh: stateCh,
dataCh: dataCh,
@ -226,12 +228,6 @@ func (r *Reactor) OnStop() {
}
}
// SetEventBus sets the reactor's event bus.
func (r *Reactor) SetEventBus(b *eventbus.EventBus) {
r.eventBus = b
r.state.SetEventBus(b)
}
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
func (r *Reactor) WaitSync() bool {
r.mtx.RLock()


+ 9
- 10
internal/consensus/reactor_test.go View File

@ -110,13 +110,12 @@ func setup(
state,
chCreator(nodeID),
node.MakePeerUpdates(ctx, t),
state.eventBus,
true,
NopMetrics(),
)
require.NoError(t, err)
reactor.SetEventBus(state.eventBus)
blocksSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{
ClientID: testSubscriber,
Query: types.EventQueryNewBlock,
@ -461,6 +460,7 @@ func TestReactorWithEvidence(t *testing.T) {
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
require.NoError(t, err)
@ -483,7 +483,6 @@ func TestReactorWithEvidence(t *testing.T) {
log.TestingLogger().With("module", "mempool"),
thisConfig.Mempool,
proxyAppConnMem,
0,
)
if thisConfig.Consensus.WaitForTxs() {
@ -504,15 +503,15 @@ func TestReactorWithEvidence(t *testing.T) {
evpool2 := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(ctx, logger.With("validator", i, "module", "consensus"),
thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
cs.SetPrivValidator(ctx, pv)
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
require.NoError(t, eventBus.Start(ctx))
cs.SetEventBus(eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
cs, err := NewState(ctx, logger.With("validator", i, "module", "consensus"),
thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool2, eventBus)
require.NoError(t, err)
cs.SetPrivValidator(ctx, pv)
cs.SetTimeoutTicker(tickerFunc())


+ 21
- 20
internal/consensus/replay.go View File

@ -10,6 +10,7 @@ import (
"reflect"
"time"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/internal/eventbus"
@ -204,7 +205,7 @@ type Handshaker struct {
stateStore sm.Store
initialState sm.State
store sm.BlockStore
eventBus types.BlockEventPublisher
eventBus *eventbus.EventBus
genDoc *types.GenesisDoc
logger log.Logger
@ -216,7 +217,7 @@ func NewHandshaker(
stateStore sm.Store,
state sm.State,
store sm.BlockStore,
eventBus types.BlockEventPublisher,
eventBus *eventbus.EventBus,
genDoc *types.GenesisDoc,
) *Handshaker {
@ -237,10 +238,10 @@ func (h *Handshaker) NBlocks() int {
}
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error {
func (h *Handshaker) Handshake(ctx context.Context, appClient abciclient.Client) error {
// Handshake is done via ABCI Info on the query conn.
res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo)
res, err := appClient.Info(ctx, proxy.RequestInfo)
if err != nil {
return fmt.Errorf("error calling Info: %w", err)
}
@ -264,7 +265,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err
}
// Replay blocks up to the latest in the blockstore.
_, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp)
_, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, appClient)
if err != nil {
return fmt.Errorf("error on replay: %w", err)
}
@ -285,7 +286,7 @@ func (h *Handshaker) ReplayBlocks(
state sm.State,
appHash []byte,
appBlockHeight int64,
proxyApp proxy.AppConns,
appClient abciclient.Client,
) ([]byte, error) {
storeBlockBase := h.store.Base()
storeBlockHeight := h.store.Height()
@ -316,7 +317,7 @@ func (h *Handshaker) ReplayBlocks(
Validators: nextVals,
AppStateBytes: h.genDoc.AppState,
}
res, err := proxyApp.Consensus().InitChain(ctx, req)
res, err := appClient.InitChain(ctx, req)
if err != nil {
return nil, err
}
@ -390,7 +391,7 @@ func (h *Handshaker) ReplayBlocks(
// Either the app is asking for replay, or we're all synced up.
if appBlockHeight < storeBlockHeight {
// the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store)
return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false)
return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, false)
} else if appBlockHeight == storeBlockHeight {
// We're good!
@ -405,7 +406,7 @@ func (h *Handshaker) ReplayBlocks(
case appBlockHeight < stateBlockHeight:
// the app is further behind than it should be, so replay blocks
// but leave the last block to go through the WAL
return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true)
return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, true)
case appBlockHeight == stateBlockHeight:
// We haven't run Commit (both the state and app are one block behind),
@ -413,7 +414,7 @@ func (h *Handshaker) ReplayBlocks(
// NOTE: We could instead use the cs.WAL on cs.Start,
// but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
h.logger.Info("Replay last block using real app")
state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus())
state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient)
return state.AppHash, err
case appBlockHeight == storeBlockHeight:
@ -426,6 +427,9 @@ func (h *Handshaker) ReplayBlocks(
if err != nil {
return nil, err
}
if err := mockApp.Start(ctx); err != nil {
return nil, err
}
h.logger.Info("Replay last block using mock app")
state, err = h.replayBlock(ctx, state, storeBlockHeight, mockApp)
@ -445,7 +449,7 @@ func (h *Handshaker) ReplayBlocks(
func (h *Handshaker) replayBlocks(
ctx context.Context,
state sm.State,
proxyApp proxy.AppConns,
appClient abciclient.Client,
appBlockHeight,
storeBlockHeight int64,
mutateState bool) ([]byte, error) {
@ -480,17 +484,15 @@ func (h *Handshaker) replayBlocks(
if i == finalBlock && !mutateState {
// We emit events for the index services at the final block due to the sync issue when
// the node shutdown during the block committing status.
blockExec := sm.NewBlockExecutor(
h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{}, h.store)
blockExec.SetEventBus(h.eventBus)
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus)
appHash, err = sm.ExecCommitBlock(ctx,
blockExec, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
blockExec, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
if err != nil {
return nil, err
}
} else {
appHash, err = sm.ExecCommitBlock(ctx,
nil, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
nil, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
if err != nil {
return nil, err
}
@ -501,7 +503,7 @@ func (h *Handshaker) replayBlocks(
if mutateState {
// sync the final block
state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus())
state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient)
if err != nil {
return nil, err
}
@ -517,15 +519,14 @@ func (h *Handshaker) replayBlock(
ctx context.Context,
state sm.State,
height int64,
proxyApp proxy.AppConnConsensus,
appClient abciclient.Client,
) (sm.State, error) {
block := h.store.LoadBlock(height)
meta := h.store.LoadBlockMeta(height)
// Use stubs for both mempool and evidence pool since no transactions nor
// evidence are needed here - block already exists.
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, h.store)
blockExec.SetEventBus(h.eventBus)
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus)
var err error
state, err = blockExec.ApplyBlock(ctx, state, meta.BlockID, block)


+ 26
- 20
internal/consensus/replay_file.go View File

@ -84,7 +84,7 @@ func (cs *State) ReplayFile(ctx context.Context, file string, console bool) erro
return err
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
pb := newPlayback(file, fp, cs, cs.stateStore)
defer pb.fp.Close()
var nextN int // apply N msgs in a row
@ -126,17 +126,17 @@ type playback struct {
count int // how many lines/msgs into the file are we
// replays can be reset to beginning
fileName string // so we can close/reopen the file
genesisState sm.State // so the replay session knows where to restart from
fileName string // so we can close/reopen the file
stateStore sm.Store
}
func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *playback {
func newPlayback(fileName string, fp *os.File, cs *State, store sm.Store) *playback {
return &playback{
cs: cs,
fp: fp,
fileName: fileName,
genesisState: genState,
dec: NewWALDecoder(fp),
cs: cs,
fp: fp,
fileName: fileName,
stateStore: store,
dec: NewWALDecoder(fp),
}
}
@ -145,9 +145,11 @@ func (pb *playback) replayReset(ctx context.Context, count int, newStepSub event
pb.cs.Stop()
pb.cs.Wait()
newCS := NewState(ctx, pb.cs.logger, pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
newCS.SetEventBus(pb.cs.eventBus)
newCS, err := NewState(ctx, pb.cs.logger, pb.cs.config, pb.stateStore, pb.cs.blockExec,
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, pb.cs.eventBus)
if err != nil {
return err
}
newCS.startForReplay()
if err := pb.fp.Close(); err != nil {
@ -323,9 +325,12 @@ func newConsensusStateForReplay(
return nil, err
}
// Create proxyAppConn connection (consensus, mempool, query)
clientCreator, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir())
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
client, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir())
if err != nil {
return nil, err
}
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
err = proxyApp.Start(ctx)
if err != nil {
return nil, fmt.Errorf("starting proxy app conns: %w", err)
@ -343,11 +348,12 @@ func newConsensusStateForReplay(
}
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mempool, evpool, blockStore)
consensusState := NewState(ctx, logger, csConfig, state.Copy(), blockExec,
blockStore, mempool, evpool)
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mempool, evpool, blockStore, eventBus)
consensusState.SetEventBus(eventBus)
consensusState, err := NewState(ctx, logger, csConfig, stateStore, blockExec,
blockStore, mempool, evpool, eventBus)
if err != nil {
return nil, err
}
return consensusState, nil
}

+ 4
- 15
internal/consensus/replay_stubs.go View File

@ -32,7 +32,7 @@ func (emptyMempool) Update(
_ context.Context,
_ int64,
_ types.Txs,
_ []*abci.ResponseDeliverTx,
_ []*abci.ExecTxResult,
_ mempool.PreCheckFunc,
_ mempool.PostCheckFunc,
) error {
@ -61,22 +61,11 @@ func newMockProxyApp(
logger log.Logger,
appHash []byte,
abciResponses *tmstate.ABCIResponses,
) (proxy.AppConnConsensus, error) {
clientCreator := abciclient.NewLocalCreator(&mockProxyApp{
) (abciclient.Client, error) {
return proxy.New(abciclient.NewLocalClient(logger, &mockProxyApp{
appHash: appHash,
abciResponses: abciResponses,
})
cli, err := clientCreator(logger)
if err != nil {
return nil, err
}
if err = cli.Start(ctx); err != nil {
return nil, err
}
return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()), nil
}), logger, proxy.NopMetrics()), nil
}
type mockProxyApp struct {


+ 46
- 87
internal/consensus/replay_test.go View File

@ -35,7 +35,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/privval"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
@ -652,61 +651,6 @@ func TestHandshakeReplayNone(t *testing.T) {
}
}
// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx
func TestMockProxyApp(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sim := setupSimulator(ctx, t) // setup config and simulator
cfg := sim.Config
assert.NotNil(t, cfg)
logger := log.TestingLogger()
var validTxs, invalidTxs = 0, 0
txCount := 0
assert.NotPanics(t, func() {
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
abciResWithEmptyDeliverTx.FinalizeBlock = new(abci.ResponseFinalizeBlock)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{})
// called when saveABCIResponses:
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
require.NoError(t, err)
loadedAbciRes := new(tmstate.ABCIResponses)
// this also happens sm.LoadABCIResponses
err = proto.Unmarshal(bytes, loadedAbciRes)
require.NoError(t, err)
mock, err := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes)
require.NoError(t, err)
abciRes := new(tmstate.ABCIResponses)
abciRes.FinalizeBlock = new(abci.ResponseFinalizeBlock)
abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs))
someTx := []byte("tx")
resp, err := mock.FinalizeBlock(ctx, abci.RequestFinalizeBlock{Txs: [][]byte{someTx}})
require.NoError(t, err)
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
for _, tx := range resp.Txs {
if tx.Code == abci.CodeTypeOK {
validTxs++
} else {
invalidTxs++
}
txCount++
}
})
require.Equal(t, 1, txCount)
require.Equal(t, 1, validTxs)
require.Zero(t, invalidTxs)
}
func tempWALWithData(t *testing.T, data []byte) string {
t.Helper()
@ -804,16 +748,19 @@ func testHandshakeReplay(
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int())))
t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) })
clientCreator2 := abciclient.NewLocalCreator(kvstoreApp)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
clientCreator2 := abciclient.NewLocalClient(logger, kvstoreApp)
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics())
proxyApp := proxy.New(clientCreator2, logger, proxy.NopMetrics())
stateDB1 := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB1)
err := stateStore.Save(genesisState)
require.NoError(t, err)
buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store)
buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, eventBus, nBlocks, mode, store)
}
// Prune block store if requested
@ -828,10 +775,11 @@ func testHandshakeReplay(
// now start the app using the handshake - it should sync
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
require.NoError(t, err)
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics())
handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
proxyApp := proxy.New(clientCreator2, logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
require.True(t, proxyApp.IsRunning())
require.NotNil(t, proxyApp)
t.Cleanup(func() { cancel(); proxyApp.Wait() })
err = handshaker.Handshake(ctx, proxyApp)
@ -842,7 +790,7 @@ func testHandshakeReplay(
require.NoError(t, err, "Error on abci handshake")
// get the latest app hash from the app
res, err := proxyApp.Query().Info(ctx, abci.RequestInfo{Version: ""})
res, err := proxyApp.Info(ctx, abci.RequestInfo{Version: ""})
if err != nil {
t.Fatal(err)
}
@ -875,11 +823,12 @@ func applyBlock(
evpool sm.EvidencePool,
st sm.State,
blk *types.Block,
proxyApp proxy.AppConns,
appClient abciclient.Client,
blockStore *mockBlockStore,
eventBus *eventbus.EventBus,
) sm.State {
testPartSize := types.BlockPartSizeBytes
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), appClient, mempool, evpool, blockStore, eventBus)
bps, err := blk.MakePartSet(testPartSize)
require.NoError(t, err)
@ -892,23 +841,24 @@ func applyBlock(
func buildAppStateFromChain(
ctx context.Context,
t *testing.T,
proxyApp proxy.AppConns,
appClient abciclient.Client,
stateStore sm.Store,
mempool mempool.Mempool,
evpool sm.EvidencePool,
state sm.State,
chain []*types.Block,
eventBus *eventbus.EventBus,
nBlocks int,
mode uint,
blockStore *mockBlockStore,
) {
t.Helper()
// start a new app without handshake, play nBlocks blocks
require.NoError(t, proxyApp.Start(ctx))
require.NoError(t, appClient.Start(ctx))
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
_, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{
_, err := appClient.InitChain(ctx, abci.RequestInitChain{
Validators: validators,
})
require.NoError(t, err)
@ -919,18 +869,18 @@ func buildAppStateFromChain(
case 0:
for i := 0; i < nBlocks; i++ {
block := chain[i]
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus)
}
case 1, 2, 3:
for i := 0; i < nBlocks-1; i++ {
block := chain[i]
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus)
}
if mode == 2 || mode == 3 {
// update the kvstore height and apphash
// as if we ran commit but not
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], appClient, blockStore, eventBus)
}
default:
require.Fail(t, "unknown mode %v", mode)
@ -958,37 +908,40 @@ func buildTMStateFromChain(
kvstoreApp := kvstore.NewPersistentKVStoreApplication(logger,
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))
defer kvstoreApp.Close()
clientCreator := abciclient.NewLocalCreator(kvstoreApp)
client := abciclient.NewLocalClient(logger, kvstoreApp)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx))
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
_, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{
_, err := proxyApp.InitChain(ctx, abci.RequestInitChain{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
switch mode {
case 0:
// sync right up
for _, block := range chain {
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus)
}
case 1, 2, 3:
// sync up to the penultimate as if we stored the block.
// whether we commit or not depends on the appHash
for _, block := range chain[:len(chain)-1] {
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus)
}
// apply the final block to a state copy so we can
// get the right next appHash but keep the state back
applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore)
applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore, eventBus)
default:
require.Fail(t, "unknown mode %v", mode)
}
@ -1025,20 +978,23 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
logger := log.TestingLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
// 2. Tendermint must panic if app returns wrong hash for the first block
// - RANDOM HASH
// - 0x02
// - 0x03
{
app := &badApp{numBlocks: 3, allHashesAreWrong: true}
clientCreator := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
client := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
t.Cleanup(func() { cancel(); proxyApp.Wait() })
assert.Panics(t, func() {
h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
if err = h.Handshake(ctx, proxyApp); err != nil {
t.Log(err)
}
@ -1051,14 +1007,14 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
// - RANDOM HASH
{
app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true}
clientCreator := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
client := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
t.Cleanup(func() { cancel(); proxyApp.Wait() })
assert.Panics(t, func() {
h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
if err = h.Handshake(ctx, proxyApp); err != nil {
t.Log(err)
}
@ -1282,12 +1238,16 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
votePower := 10 + int64(rand.Uint32())
val, _, err := factory.Validator(ctx, votePower)
require.NoError(t, err)
vals := types.NewValidatorSet([]*types.Validator{val})
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := abciclient.NewLocalCreator(app)
client := abciclient.NewLocalClient(logger, app)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
cfg, err := ResetConfig(t.TempDir(), "handshake_test_")
require.NoError(t, err)
@ -1306,9 +1266,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
require.NoError(t, err)
logger := log.TestingLogger()
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
proxyApp := proxy.New(client, logger, proxy.NopMetrics())
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
require.NoError(t, handshaker.Handshake(ctx, proxyApp), "error on abci handshake")


+ 39
- 19
internal/consensus/state.go View File

@ -121,6 +121,9 @@ type State struct {
// store blocks and commits
blockStore sm.BlockStore
stateStore sm.Store
initialStatePopulated bool
// create and execute blocks
blockExec *sm.BlockExecutor
@ -189,18 +192,21 @@ func NewState(
ctx context.Context,
logger log.Logger,
cfg *config.ConsensusConfig,
state sm.State,
store sm.Store,
blockExec *sm.BlockExecutor,
blockStore sm.BlockStore,
txNotifier txNotifier,
evpool evidencePool,
eventBus *eventbus.EventBus,
options ...StateOption,
) *State {
) (*State, error) {
cs := &State{
eventBus: eventBus,
logger: logger,
config: cfg,
blockExec: blockExec,
blockStore: blockStore,
stateStore: store,
txNotifier: txNotifier,
peerMsgQueue: make(chan msgInfo, msgQueueSize),
internalMsgQueue: make(chan msgInfo, msgQueueSize),
@ -220,27 +226,40 @@ func NewState(
cs.doPrevote = cs.defaultDoPrevote
cs.setProposal = cs.defaultSetProposal
// We have no votes, so reconstruct LastCommit from SeenCommit.
if state.LastBlockHeight > 0 {
cs.reconstructLastCommit(state)
if err := cs.updateStateFromStore(ctx); err != nil {
return nil, err
}
cs.updateToState(ctx, state)
// NOTE: we do not call scheduleRound0 yet, we do that upon Start()
cs.BaseService = *service.NewBaseService(logger, "State", cs)
for _, option := range options {
option(cs)
}
return cs
return cs, nil
}
// SetEventBus sets event bus.
func (cs *State) SetEventBus(b *eventbus.EventBus) {
cs.eventBus = b
cs.blockExec.SetEventBus(b)
func (cs *State) updateStateFromStore(ctx context.Context) error {
if cs.initialStatePopulated {
return nil
}
state, err := cs.stateStore.Load()
if err != nil {
return fmt.Errorf("loading state: %w", err)
}
if state.IsEmpty() {
return nil
}
// We have no votes, so reconstruct LastCommit from SeenCommit.
if state.LastBlockHeight > 0 {
cs.reconstructLastCommit(state)
}
cs.updateToState(ctx, state)
cs.initialStatePopulated = true
return nil
}
// StateMetrics sets the metrics.
@ -365,6 +384,10 @@ func (cs *State) LoadCommit(height int64) *types.Commit {
// OnStart loads the latest state via the WAL, and starts the timeout and
// receive routines.
func (cs *State) OnStart(ctx context.Context) error {
if err := cs.updateStateFromStore(ctx); err != nil {
return err
}
// We may set the WAL in testing before calling Start, so only OpenWAL if its
// still the nilWAL.
if _, ok := cs.wal.(nilWAL); ok {
@ -867,14 +890,11 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
}
}
rs := cs.RoundState
var mi msgInfo
select {
case <-cs.txNotifier.TxsAvailable():
cs.handleTxsAvailable(ctx)
case mi = <-cs.peerMsgQueue:
case mi := <-cs.peerMsgQueue:
if err := cs.wal.Write(mi); err != nil {
cs.logger.Error("failed writing to WAL", "err", err)
}
@ -883,7 +903,7 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
// may generate internal events (votes, complete proposals, 2/3 majorities)
cs.handleMsg(ctx, mi)
case mi = <-cs.internalMsgQueue:
case mi := <-cs.internalMsgQueue:
err := cs.wal.WriteSync(mi) // NOTE: fsync
if err != nil {
panic(fmt.Sprintf(
@ -902,7 +922,7 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
// if the timeout is relevant to the rs
// go to the next step
cs.handleTimeout(ctx, ti, rs)
cs.handleTimeout(ctx, ti, cs.RoundState)
case <-ctx.Done():
onExit(cs)


+ 73
- 0
internal/consensus/state_test.go View File

@ -1965,6 +1965,79 @@ func TestProcessProposalAccept(t *testing.T) {
}
}
func TestFinalizeBlockCalled(t *testing.T) {
for _, testCase := range []struct {
name string
voteNil bool
expectCalled bool
}{
{
name: "finalze block called when block committed",
voteNil: false,
expectCalled: true,
},
{
name: "not called when block not committed",
voteNil: true,
expectCalled: false,
},
} {
t.Run(testCase.name, func(t *testing.T) {
config := configSetup(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
m := abcimocks.NewBaseMock()
m.On("ProcessProposal", mock.Anything).Return(abcitypes.ResponseProcessProposal{Accept: true})
m.On("VerifyVoteExtension", mock.Anything).Return(abcitypes.ResponseVerifyVoteExtension{
Result: abcitypes.ResponseVerifyVoteExtension_ACCEPT,
})
m.On("FinalizeBlock", mock.Anything).Return(abcitypes.ResponseFinalizeBlock{}).Maybe()
cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m})
height, round := cs1.Height, cs1.Round
proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound)
pv1, err := cs1.privValidator.GetPubKey(ctx)
require.NoError(t, err)
addr := pv1.Address()
voteCh := subscribeToVoter(ctx, t, cs1, addr)
startTestRound(ctx, cs1, cs1.Height, round)
ensureNewRound(t, newRoundCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
rs := cs1.GetRoundState()
blockID := types.BlockID{}
nextRound := round + 1
nextHeight := height
if !testCase.voteNil {
nextRound = 0
nextHeight = height + 1
blockID = types.BlockID{
Hash: rs.ProposalBlock.Hash(),
PartSetHeader: rs.ProposalBlockParts.Header(),
}
}
signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...)
ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash())
signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...)
ensurePrecommit(t, voteCh, height, round)
ensureNewRound(t, newRoundCh, nextHeight, nextRound)
m.AssertExpectations(t)
if !testCase.expectCalled {
m.AssertNotCalled(t, "FinalizeBlock", mock.Anything)
} else {
m.AssertCalled(t, "FinalizeBlock", mock.Anything)
}
})
}
}
// 4 vals, 3 Nil Precommits at P0
// What we want:
// P0 waits for timeoutPrecommit before starting next round


+ 30
- 23
internal/consensus/wal_generator.go View File

@ -30,8 +30,10 @@ import (
// stripped down version of node (proxy app, event bus, consensus state) with a
// persistent kvstore application and special consensus wal instance
// (byteBufferWAL) and waits until numBlocks are created.
// If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) (err error) {
// If the node fails to produce given numBlocks, it fails the test.
func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) {
t.Helper()
cfg := getConfig(t)
app := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), "wal_generator"))
@ -46,41 +48,46 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
privValidatorStateFile := cfg.PrivValidator.StateFile()
privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
if err != nil {
return err
t.Fatal(err)
}
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
return fmt.Errorf("failed to read genesis file: %w", err)
t.Fatal(fmt.Errorf("failed to read genesis file: %w", err))
}
blockStoreDB := dbm.NewMemDB()
stateDB := blockStoreDB
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
if err != nil {
return fmt.Errorf("failed to make genesis state: %w", err)
t.Fatal(fmt.Errorf("failed to make genesis state: %w", err))
}
state.Version.Consensus.App = kvstore.ProtocolVersion
if err = stateStore.Save(state); err != nil {
t.Error(err)
t.Fatal(err)
}
blockStore := store.NewBlockStore(blockStoreDB)
proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), logger.With("module", "proxy"), proxy.NopMetrics())
proxyLogger := logger.With("module", "proxy")
proxyApp := proxy.New(abciclient.NewLocalClient(logger, app), proxyLogger, proxy.NopMetrics())
if err := proxyApp.Start(ctx); err != nil {
return fmt.Errorf("failed to start proxy app connections: %w", err)
t.Fatal(fmt.Errorf("failed to start proxy app connections: %w", err))
}
t.Cleanup(proxyApp.Wait)
eventBus := eventbus.NewDefault(logger.With("module", "events"))
if err := eventBus.Start(ctx); err != nil {
return fmt.Errorf("failed to start event bus: %w", err)
t.Fatal(fmt.Errorf("failed to start event bus: %w", err))
}
t.Cleanup(func() { eventBus.Stop(); eventBus.Wait() })
mempool := emptyMempool{}
evpool := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
consensusState := NewState(ctx, logger, cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
consensusState.SetEventBus(eventBus)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp, mempool, evpool, blockStore, eventBus)
consensusState, err := NewState(ctx, logger, cfg.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
if err != nil {
t.Fatal(err)
}
if privValidator != nil && privValidator != (*privval.FilePV)(nil) {
consensusState.SetPrivValidator(ctx, privValidator)
}
@ -91,22 +98,24 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
// see wal.go#103
if err := wal.Write(EndHeightMessage{0}); err != nil {
t.Error(err)
t.Fatal(err)
}
consensusState.wal = wal
if err := consensusState.Start(ctx); err != nil {
return fmt.Errorf("failed to start consensus state: %w", err)
t.Fatal(fmt.Errorf("failed to start consensus state: %w", err))
}
t.Cleanup(consensusState.Wait)
defer consensusState.Stop()
timer := time.NewTimer(time.Minute)
defer timer.Stop()
select {
case <-numBlocksWritten:
consensusState.Stop()
return nil
case <-time.After(1 * time.Minute):
consensusState.Stop()
return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
case <-timer.C:
t.Fatal(fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks))
}
}
@ -115,9 +124,7 @@ func WALWithNBlocks(ctx context.Context, t *testing.T, logger log.Logger, numBlo
var b bytes.Buffer
wr := bufio.NewWriter(&b)
if err := WALGenerateNBlocks(ctx, t, logger, wr, numBlocks); err != nil {
return []byte{}, err
}
WALGenerateNBlocks(ctx, t, logger, wr, numBlocks)
wr.Flush()
return b.Bytes(), nil


+ 9
- 10
internal/consensus/wal_test.go View File

@ -3,6 +3,7 @@ package consensus
import (
"bytes"
"context"
"os"
"path/filepath"
"testing"
@ -41,13 +42,12 @@ func TestWALTruncate(t *testing.T) {
require.NoError(t, err)
err = wal.Start(ctx)
require.NoError(t, err)
t.Cleanup(wal.Wait)
t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() })
// 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10),
// when headBuf is full, truncate content will Flush to the file. at this
// time, RotateFile is called, truncate content exist in each file.
err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60)
require.NoError(t, err)
WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60)
// put the leakcheck here so it runs after other cleanup
// functions.
@ -112,7 +112,7 @@ func TestWALWrite(t *testing.T) {
require.NoError(t, err)
err = wal.Start(ctx)
require.NoError(t, err)
t.Cleanup(wal.Wait)
t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() })
// 1) Write returns an error if msg is too big
msg := &BlockPartMessage{
@ -151,7 +151,6 @@ func TestWALSearchForEndHeight(t *testing.T) {
wal, err := NewWAL(ctx, logger, walFile)
require.NoError(t, err)
t.Cleanup(func() { wal.Stop(); wal.Wait() })
h := int64(3)
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
@ -176,24 +175,24 @@ func TestWALPeriodicSync(t *testing.T) {
walDir := t.TempDir()
walFile := filepath.Join(walDir, "wal")
wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond))
defer os.RemoveAll(walFile)
wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(250*time.Millisecond))
require.NoError(t, err)
wal.SetFlushInterval(walTestFlushInterval)
logger := log.NewNopLogger()
// Generate some data
err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5)
require.NoError(t, err)
WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5)
// We should have data in the buffer now
assert.NotZero(t, wal.Group().Buffered())
require.NoError(t, wal.Start(ctx))
t.Cleanup(func() { wal.Stop(); wal.Wait() })
t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() })
time.Sleep(walTestFlushInterval + (10 * time.Millisecond))
time.Sleep(walTestFlushInterval + (20 * time.Millisecond))
// The data should have been flushed by the periodic sync
assert.Zero(t, wal.Group().Buffered())


+ 0
- 32
internal/eventbus/event_bus.go View File

@ -50,13 +50,6 @@ func (b *EventBus) NumClientSubscriptions(clientID string) int {
return b.pubsub.NumClientSubscriptions(clientID)
}
// Deprecated: Use SubscribeWithArgs instead.
func (b *EventBus) Subscribe(ctx context.Context,
clientID string, query *tmquery.Query, capacities ...int) (Subscription, error) {
return b.pubsub.Subscribe(ctx, clientID, query, capacities...)
}
func (b *EventBus) SubscribeWithArgs(ctx context.Context, args tmpubsub.SubscribeArgs) (Subscription, error) {
return b.pubsub.SubscribeWithArgs(ctx, args)
}
@ -201,28 +194,3 @@ func (b *EventBus) PublishEventValidatorSetUpdates(ctx context.Context, data typ
func (b *EventBus) PublishEventEvidenceValidated(ctx context.Context, evidence types.EventDataEvidenceValidated) error {
return b.Publish(ctx, types.EventEvidenceValidatedValue, evidence)
}
//-----------------------------------------------------------------------------
// NopEventBus implements a types.BlockEventPublisher that discards all events.
type NopEventBus struct{}
func (NopEventBus) PublishEventNewBlock(context.Context, types.EventDataNewBlock) error {
return nil
}
func (NopEventBus) PublishEventNewBlockHeader(context.Context, types.EventDataNewBlockHeader) error {
return nil
}
func (NopEventBus) PublishEventNewEvidence(context.Context, types.EventDataNewEvidence) error {
return nil
}
func (NopEventBus) PublishEventTx(context.Context, types.EventDataTx) error {
return nil
}
func (NopEventBus) PublishEventValidatorSetUpdates(context.Context, types.EventDataValidatorSetUpdates) error {
return nil
}

+ 2
- 2
internal/eventbus/event_bus_test.go View File

@ -27,7 +27,7 @@ func TestEventBusPublishEventTx(t *testing.T) {
require.NoError(t, err)
tx := types.Tx("foo")
result := abci.ResponseDeliverTx{
result := abci.ExecTxResult{
Data: []byte("bar"),
Events: []abci.Event{
{Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}},
@ -134,7 +134,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) {
require.NoError(t, err)
tx := types.Tx("foo")
result := abci.ResponseDeliverTx{
result := abci.ExecTxResult{
Data: []byte("bar"),
Events: []abci.Event{
{


+ 33
- 35
internal/evidence/pool.go View File

@ -36,14 +36,14 @@ type Pool struct {
evidenceList *clist.CList // concurrent linked-list of evidence
evidenceSize uint32 // amount of pending evidence
// needed to load validators to verify evidence
stateDB sm.Store
// needed to load headers and commits to verify evidence
blockStore BlockStore
stateDB sm.Store
mtx sync.Mutex
// latest state
state sm.State
state sm.State
isStarted bool
// evidence from consensus is buffered to this slice, awaiting until the next height
// before being flushed to the pool. This prevents broadcasting and proposing of
// evidence before the height with which the evidence happened is finished.
@ -60,46 +60,19 @@ type Pool struct {
Metrics *Metrics
}
func (evpool *Pool) SetEventBus(e *eventbus.EventBus) {
evpool.eventBus = e
}
// NewPool creates an evidence pool. If using an existing evidence store,
// it will add all pending evidence to the concurrent list.
func NewPool(logger log.Logger, evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore, metrics *Metrics) (*Pool, error) {
state, err := stateDB.Load()
if err != nil {
return nil, fmt.Errorf("failed to load state: %w", err)
}
pool := &Pool{
stateDB: stateDB,
func NewPool(logger log.Logger, evidenceDB dbm.DB, stateStore sm.Store, blockStore BlockStore, metrics *Metrics, eventBus *eventbus.EventBus) *Pool {
return &Pool{
blockStore: blockStore,
state: state,
stateDB: stateStore,
logger: logger,
evidenceStore: evidenceDB,
evidenceList: clist.New(),
consensusBuffer: make([]duplicateVoteSet, 0),
Metrics: metrics,
eventBus: eventBus,
}
// If pending evidence already in db, in event of prior failure, then check
// for expiration, update the size and load it back to the evidenceList.
pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence()
evList, _, err := pool.listEvidence(prefixPending, -1)
if err != nil {
return nil, err
}
atomic.StoreUint32(&pool.evidenceSize, uint32(len(evList)))
pool.Metrics.NumEvidence.Set(float64(pool.evidenceSize))
for _, ev := range evList {
pool.evidenceList.PushBack(ev)
}
pool.eventBus = nil
return pool, nil
}
// PendingEvidence is used primarily as part of block proposal and returns up to
@ -277,6 +250,31 @@ func (evpool *Pool) State() sm.State {
return evpool.state
}
func (evpool *Pool) Start(state sm.State) error {
if evpool.isStarted {
return errors.New("pool is already running")
}
evpool.state = state
// If pending evidence already in db, in event of prior failure, then check
// for expiration, update the size and load it back to the evidenceList.
evpool.pruningHeight, evpool.pruningTime = evpool.removeExpiredPendingEvidence()
evList, _, err := evpool.listEvidence(prefixPending, -1)
if err != nil {
return err
}
atomic.StoreUint32(&evpool.evidenceSize, uint32(len(evList)))
evpool.Metrics.NumEvidence.Set(float64(evpool.evidenceSize))
for _, ev := range evList {
evpool.evidenceList.PushBack(ev)
}
return nil
}
func (evpool *Pool) Close() error {
return evpool.evidenceStore.Close()
}
@ -449,6 +447,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide
}
func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) {
batch := evpool.evidenceStore.NewBatch()
defer batch.Close()
@ -473,7 +472,6 @@ func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) {
// remove evidence from the clist
evpool.removeEvidenceFromList(blockEvidenceMap)
// update the evidence size
atomic.AddUint32(&evpool.evidenceSize, ^uint32(len(blockEvidenceMap)-1))


+ 53
- 48
internal/evidence/pool_test.go View File

@ -34,6 +34,18 @@ var (
defaultEvidenceMaxBytes int64 = 1000
)
func startPool(t *testing.T, pool *evidence.Pool, store sm.Store) {
t.Helper()
state, err := store.Load()
if err != nil {
t.Fatalf("cannot load state: %v", err)
}
if err := pool.Start(state); err != nil {
t.Fatalf("cannot start state pool: %v", err)
}
}
func TestEvidencePoolBasic(t *testing.T) {
var (
height = int64(1)
@ -51,9 +63,13 @@ func TestEvidencePoolBasic(t *testing.T) {
stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil)
stateStore.On("Load").Return(createState(height+1, valSet), nil)
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
require.NoError(t, setupEventBus(ctx, pool))
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
// evidence not seen yet:
evs, size := pool.PendingEvidence(defaultEvidenceMaxBytes)
require.Equal(t, 0, len(evs))
@ -115,10 +131,12 @@ func TestAddExpiredEvidence(t *testing.T) {
return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}}
})
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
testCases := []struct {
evHeight int64
@ -159,9 +177,7 @@ func TestReportConflictingVotes(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, pv := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, pv, _ := defaultTestPool(ctx, t, height)
val := types.NewValidator(pv.PrivKey.PubKey(), 10)
@ -201,9 +217,7 @@ func TestEvidencePoolUpdate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, val, _ := defaultTestPool(ctx, t, height)
state := pool.State()
@ -273,9 +287,7 @@ func TestVerifyPendingEvidencePasses(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, val, _ := defaultTestPool(ctx, t, height)
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -295,9 +307,7 @@ func TestVerifyDuplicatedEvidenceFails(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
require.NoError(t, setupEventBus(ctx, pool))
pool, val, _ := defaultTestPool(ctx, t, height)
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -321,7 +331,7 @@ func TestEventOnEvidenceValidated(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pool, val := defaultTestPool(ctx, t, height)
pool, val, eventBus := defaultTestPool(ctx, t, height)
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -332,11 +342,6 @@ func TestEventOnEvidenceValidated(t *testing.T) {
)
require.NoError(t, err)
eventBus := eventbus.NewDefault(log.TestingLogger())
require.NoError(t, eventBus.Start(ctx))
pool.SetEventBus(eventBus)
const query = `tm.event='EvidenceValidated'`
evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{
ClientID: "test",
@ -348,6 +353,9 @@ func TestEventOnEvidenceValidated(t *testing.T) {
go func() {
defer close(done)
msg, err := evSub.Next(ctx)
if ctx.Err() != nil {
return
}
assert.NoError(t, err)
edt := msg.Data().(types.EventDataEvidenceValidated)
@ -394,14 +402,15 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) {
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
hash := ev.Hash()
err = pool.AddEvidence(ctx, ev)
err := pool.AddEvidence(ctx, ev)
require.NoError(t, err)
err = pool.AddEvidence(ctx, ev)
require.NoError(t, err)
@ -449,11 +458,13 @@ func TestRecoverPendingEvidence(t *testing.T) {
blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress)
require.NoError(t, err)
// create previous pool and populate it
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
// create previous pool and populate it
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
goodEvidence, err := types.NewMockDuplicateVoteEvidenceWithValidator(
ctx,
@ -495,9 +506,8 @@ func TestRecoverPendingEvidence(t *testing.T) {
},
}, nil)
newPool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, newStateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
newPool := evidence.NewPool(logger, evidenceDB, newStateStore, blockStore, evidence.NopMetrics(), nil)
startPool(t, newPool, newStateStore)
evList, _ := newPool.PendingEvidence(defaultEvidenceMaxBytes)
require.Equal(t, 1, len(evList))
@ -590,7 +600,7 @@ func makeCommit(height int64, valAddr []byte) *types.Commit {
return types.NewCommit(height, 0, types.BlockID{}, commitSigs)
}
func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV) {
func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV, *eventbus.EventBus) {
t.Helper()
val := types.NewMockPV()
valAddress := val.PrivKey.PubKey().Address()
@ -601,10 +611,14 @@ func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence
blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress)
require.NoError(t, err)
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err, "test evidence pool could not be created")
logger := log.NewNopLogger()
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
return pool, val
pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
return pool, val, eventBus
}
func createState(height int64, valSet *types.ValidatorSet) sm.State {
@ -616,12 +630,3 @@ func createState(height int64, valSet *types.ValidatorSet) sm.State {
ConsensusParams: *types.DefaultConsensusParams(),
}
}
func setupEventBus(ctx context.Context, evpool *evidence.Pool) error {
eventBus := eventbus.NewDefault(log.TestingLogger())
if err := eventBus.Start(ctx); err != nil {
return err
}
evpool.SetEventBus(eventBus)
return nil
}

+ 5
- 4
internal/evidence/reactor_test.go View File

@ -82,13 +82,14 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint
}
return nil
})
rts.pools[nodeID], err = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
err = eventBus.Start(ctx)
require.NoError(t, err)
rts.pools[nodeID].SetEventBus(eventBus)
rts.pools[nodeID] = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore, evidence.NopMetrics(), eventBus)
startPool(t, rts.pools[nodeID], stateStores[idx])
require.NoError(t, err)
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)


+ 34
- 26
internal/evidence/verify_test.go View File

@ -12,6 +12,7 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/evidence"
"github.com/tendermint/tendermint/internal/evidence/mocks"
sm "github.com/tendermint/tendermint/internal/state"
@ -76,6 +77,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
attackTime := defaultEvidenceTime.Add(1 * time.Hour)
// create valid lunatic evidence
@ -96,8 +98,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header})
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
pool := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
evList := types.EvidenceList{ev}
// check that the evidence pool correctly verifies the evidence
@ -111,32 +112,29 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
// if we submit evidence only against a single byzantine validator when we see there are more validators then this
// should return an error
ev.ByzantineValidators = ev.ByzantineValidators[:1]
t.Log(evList)
assert.Error(t, pool.CheckEvidence(ctx, evList))
// restore original byz vals
ev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader)
// duplicate evidence should be rejected
evList = types.EvidenceList{ev, ev}
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
assert.Error(t, pool.CheckEvidence(ctx, evList))
// If evidence is submitted with an altered timestamp it should return an error
ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute)
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
err = pool.AddEvidence(ctx, ev)
err := pool.AddEvidence(ctx, ev)
assert.Error(t, err)
ev.Timestamp = defaultEvidenceTime
// Evidence submitted with a different validator power should fail
ev.TotalVotingPower = 1
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), nil)
err = pool.AddEvidence(ctx, ev)
assert.Error(t, err)
ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower()
@ -154,6 +152,9 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
// create a forward lunatic attack
ev, trusted, common := makeLunaticEvidence(ctx,
t, attackHeight, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime)
@ -179,10 +180,11 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
blockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit)
blockStore.On("Height").Return(nodeHeight)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
require.NoError(t, setupEventBus(ctx, pool))
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
// check that the evidence pool correctly verifies the evidence
assert.NoError(t, pool.CheckEvidence(ctx, types.EvidenceList{ev}))
@ -199,8 +201,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
oldBlockStore.On("Height").Return(nodeHeight)
require.Equal(t, defaultEvidenceTime, oldBlockStore.LoadBlockMeta(nodeHeight).Header.Time)
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, oldBlockStore, evidence.NopMetrics())
require.NoError(t, err)
pool = evidence.NewPool(logger, dbm.NewMemDB(), stateStore, oldBlockStore, evidence.NopMetrics(), nil)
assert.Error(t, pool.CheckEvidence(ctx, types.EvidenceList{ev}))
}
@ -208,6 +209,8 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10)
conflictingHeader := factory.MakeHeader(t, &types.Header{
@ -289,10 +292,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
evList := types.EvidenceList{ev}
err = pool.CheckEvidence(ctx, evList)
@ -305,6 +308,9 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
var height int64 = 10
conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10)
@ -378,10 +384,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
evList := types.EvidenceList{ev}
err = pool.CheckEvidence(ctx, evList)
@ -401,6 +407,7 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
val := types.NewMockPV()
val2 := types.NewMockPV()
valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(ctx, 1)})
@ -478,10 +485,11 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) {
blockStore := &mocks.BlockStore{}
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}})
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics())
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
require.NoError(t, setupEventBus(ctx, pool))
pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus)
startPool(t, pool, stateStore)
evList := types.EvidenceList{goodEv}
err = pool.CheckEvidence(ctx, evList)


+ 1
- 1
internal/inspect/inspect_test.go View File

@ -265,7 +265,7 @@ func TestBlockResults(t *testing.T) {
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
Txs: []*abcitypes.ResponseDeliverTx{
TxResults: []*abcitypes.ExecTxResult{
{
GasUsed: testGasUsed,
},


+ 13
- 0
internal/libs/autofile/group.go View File

@ -274,6 +274,10 @@ func (g *Group) checkTotalSizeLimit(ctx context.Context) {
g.mtx.Lock()
defer g.mtx.Unlock()
if err := ctx.Err(); err != nil {
return
}
if g.totalSizeLimit == 0 {
return
}
@ -290,6 +294,11 @@ func (g *Group) checkTotalSizeLimit(ctx context.Context) {
g.logger.Error("Group's head may grow without bound", "head", g.Head.Path)
return
}
if ctx.Err() != nil {
return
}
pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex)
fInfo, err := os.Stat(pathToRemove)
if err != nil {
@ -314,6 +323,10 @@ func (g *Group) rotateFile(ctx context.Context) {
g.mtx.Lock()
defer g.mtx.Unlock()
if err := ctx.Err(); err != nil {
return
}
headPath := g.Head.Path
if err := g.headBuf.Flush(); err != nil {


+ 1
- 1
internal/libs/queue/queue_test.go View File

@ -167,7 +167,7 @@ func TestWait(t *testing.T) {
defer close(done)
got, err := q.Wait(ctx)
if err != nil {
t.Errorf("Wait: unexpected error: %w", err)
t.Errorf("Wait: unexpected error: %v", err)
} else if got != input {
t.Errorf("Wait: got %q, want %q", got, input)
}


+ 6
- 8
internal/mempool/mempool.go View File

@ -9,10 +9,10 @@ import (
"sync/atomic"
"time"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist"
"github.com/tendermint/tendermint/internal/proxy"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/types"
@ -31,7 +31,7 @@ type TxMempool struct {
logger log.Logger
metrics *Metrics
config *config.MempoolConfig
proxyAppConn proxy.AppConnMempool
proxyAppConn abciclient.Client
// txsAvailable fires once for each height when the mempool is not empty
txsAvailable chan struct{}
@ -93,8 +93,7 @@ type TxMempool struct {
func NewTxMempool(
logger log.Logger,
cfg *config.MempoolConfig,
proxyAppConn proxy.AppConnMempool,
height int64,
proxyAppConn abciclient.Client,
options ...TxMempoolOption,
) *TxMempool {
@ -102,7 +101,7 @@ func NewTxMempool(
logger: logger,
config: cfg,
proxyAppConn: proxyAppConn,
height: height,
height: -1,
cache: NopTxCache{},
metrics: NopMetrics(),
txStore: NewTxStore(),
@ -418,11 +417,10 @@ func (txmp *TxMempool) Update(
ctx context.Context,
blockHeight int64,
blockTxs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
execTxResult []*abci.ExecTxResult,
newPreFn PreCheckFunc,
newPostFn PostCheckFunc,
) error {
txmp.height = blockHeight
txmp.notifiedTxsAvailable = false
@ -434,7 +432,7 @@ func (txmp *TxMempool) Update(
}
for i, tx := range blockTxs {
if deliverTxResponses[i].Code == abci.CodeTypeOK {
if execTxResult[i].Code == abci.CodeTypeOK {
// add the valid committed transaction to the cache (if missing)
_ = txmp.cache.Push(tx)
} else if !txmp.config.KeepInvalidTxsInCache {


+ 19
- 19
internal/mempool/mempool_test.go View File

@ -78,24 +78,24 @@ func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoo
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
app := &application{kvstore.NewApplication()}
cc := abciclient.NewLocalCreator(app)
logger := log.TestingLogger()
conn := abciclient.NewLocalClient(logger, &application{
kvstore.NewApplication(),
})
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
require.NoError(t, err)
cfg.Mempool.CacheSize = cacheSize
appConnMem, err := cc(logger)
require.NoError(t, err)
require.NoError(t, appConnMem.Start(ctx))
require.NoError(t, conn.Start(ctx))
t.Cleanup(func() {
os.RemoveAll(cfg.RootDir)
cancel()
appConnMem.Wait()
conn.Wait()
})
return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, conn, options...)
}
func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
@ -172,9 +172,9 @@ func TestTxMempool_TxsAvailable(t *testing.T) {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
responses := make([]*abci.ExecTxResult, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
// commit half the transactions and ensure we fire an event
@ -204,9 +204,9 @@ func TestTxMempool_Size(t *testing.T) {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
responses := make([]*abci.ExecTxResult, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()
@ -231,9 +231,9 @@ func TestTxMempool_Flush(t *testing.T) {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
responses := make([]*abci.ExecTxResult, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()
@ -446,7 +446,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
for range ticker.C {
reapedTxs := txmp.ReapMaxTxs(200)
if len(reapedTxs) > 0 {
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
responses := make([]*abci.ExecTxResult, len(reapedTxs))
for i := 0; i < len(responses); i++ {
var code uint32
@ -456,7 +456,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
code = abci.CodeTypeOK
}
responses[i] = &abci.ResponseDeliverTx{Code: code}
responses[i] = &abci.ExecTxResult{Code: code}
}
txmp.Lock()
@ -494,9 +494,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
// reap 5 txs at the next height -- no txs should expire
reapedTxs := txmp.ReapMaxTxs(5)
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
responses := make([]*abci.ExecTxResult, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()
@ -520,9 +520,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
// removed. However, we do know that that at most 95 txs can be expired and
// removed.
reapedTxs = txmp.ReapMaxTxs(5)
responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
responses = make([]*abci.ExecTxResult, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
}
txmp.Lock()


+ 1
- 1
internal/mempool/mock/mempool.go View File

@ -27,7 +27,7 @@ func (Mempool) Update(
_ context.Context,
_ int64,
_ types.Txs,
_ []*abci.ResponseDeliverTx,
_ []*abci.ExecTxResult,
_ mempool.PreCheckFunc,
_ mempool.PostCheckFunc,
) error {


+ 3
- 3
internal/mempool/reactor_test.go View File

@ -242,9 +242,9 @@ func TestReactorConcurrency(t *testing.T) {
mempool.Lock()
defer mempool.Unlock()
deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
deliverTxResponses := make([]*abci.ExecTxResult, len(txs))
for i := range txs {
deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
deliverTxResponses[i] = &abci.ExecTxResult{Code: 0}
}
require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil))
@ -261,7 +261,7 @@ func TestReactorConcurrency(t *testing.T) {
mempool.Lock()
defer mempool.Unlock()
err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil)
err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil)
require.NoError(t, err)
}()
}


+ 1
- 1
internal/mempool/types.go View File

@ -66,7 +66,7 @@ type Mempool interface {
ctx context.Context,
blockHeight int64,
blockTxs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
txResults []*abci.ExecTxResult,
newPreFn PreCheckFunc,
newPostFn PostCheckFunc,
) error


+ 8
- 8
internal/p2p/conn/secret_connection_test.go View File

@ -126,7 +126,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
nodePrvKey := ed25519.GenPrivKey()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil {
t.Errorf("failed to establish SecretConnection for node: %w", err)
t.Errorf("failed to establish SecretConnection for node: %v", err)
return nil, true, err
}
// In parallel, handle some reads and writes.
@ -136,7 +136,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil {
t.Errorf("failed to write to nodeSecretConn: %w", err)
t.Errorf("failed to write to nodeSecretConn: %v", err)
return nil, true, err
}
if n != len(nodeWrite) {
@ -163,7 +163,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
}
return nil, false, nil
} else if err != nil {
t.Errorf("failed to read from nodeSecretConn: %w", err)
t.Errorf("failed to read from nodeSecretConn: %v", err)
return nil, true, err
}
*nodeReads = append(*nodeReads, string(readBuffer[:n]))
@ -288,7 +288,7 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n i
for i := 0; i < n; i++ {
_, err := conn.Write([]byte(txt))
if err != nil {
t.Errorf("failed to write to fooSecConn: %w", err)
t.Errorf("failed to write to fooSecConn: %v", err)
return
}
}
@ -343,7 +343,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
func(_ int) (val interface{}, abort bool, err error) {
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
if err != nil {
tb.Errorf("failed to establish SecretConnection for foo: %w", err)
tb.Errorf("failed to establish SecretConnection for foo: %v", err)
return nil, true, err
}
remotePubBytes := fooSecConn.RemotePubKey()
@ -358,7 +358,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
func(_ int) (val interface{}, abort bool, err error) {
barSecConn, err = MakeSecretConnection(barConn, barPrvKey)
if barSecConn == nil {
tb.Errorf("failed to establish SecretConnection for bar: %w", err)
tb.Errorf("failed to establish SecretConnection for bar: %v", err)
return nil, true, err
}
remotePubBytes := barSecConn.RemotePubKey()
@ -405,7 +405,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
if err == io.EOF {
return
} else if err != nil {
b.Errorf("failed to read from barSecConn: %w", err)
b.Errorf("failed to read from barSecConn: %v", err)
return
}
}
@ -416,7 +416,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
idx := mrand.Intn(len(fooWriteBytes))
_, err := fooSecConn.Write(fooWriteBytes[idx])
if err != nil {
b.Errorf("failed to write to fooSecConn: %w", err)
b.Errorf("failed to write to fooSecConn: %v", err)
return
}
}


+ 0
- 249
internal/proxy/app_conn.go View File

@ -1,249 +0,0 @@
package proxy
import (
"context"
"time"
"github.com/go-kit/kit/metrics"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
)
//go:generate ../../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot
//----------------------------------------------------------------------------------------
// Enforce which abci msgs can be sent on a connection at the type level
type AppConnConsensus interface {
Error() error
InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error)
VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)
FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
Commit(context.Context) (*types.ResponseCommit, error)
}
type AppConnMempool interface {
Error() error
CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
Flush(context.Context) error
}
type AppConnQuery interface {
Error() error
Echo(context.Context, string) (*types.ResponseEcho, error)
Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
}
type AppConnSnapshot interface {
Error() error
ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
}
//-----------------------------------------------------------------------------------------
// Implements AppConnConsensus (subset of abciclient.Client)
type appConnConsensus struct {
metrics *Metrics
appConn abciclient.Client
}
var _ AppConnConsensus = (*appConnConsensus)(nil)
func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus {
return &appConnConsensus{
metrics: metrics,
appConn: appConn,
}
}
func (app *appConnConsensus) Error() error {
return app.appConn.Error()
}
func (app *appConnConsensus) InitChain(
ctx context.Context,
req types.RequestInitChain,
) (*types.ResponseInitChain, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))()
return app.appConn.InitChain(ctx, req)
}
func (app *appConnConsensus) PrepareProposal(
ctx context.Context,
req types.RequestPrepareProposal,
) (*types.ResponsePrepareProposal, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))()
return app.appConn.PrepareProposal(ctx, req)
}
func (app *appConnConsensus) ProcessProposal(
ctx context.Context,
req types.RequestProcessProposal,
) (*types.ResponseProcessProposal, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))()
return app.appConn.ProcessProposal(ctx, req)
}
func (app *appConnConsensus) ExtendVote(
ctx context.Context,
req types.RequestExtendVote,
) (*types.ResponseExtendVote, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))()
return app.appConn.ExtendVote(ctx, req)
}
func (app *appConnConsensus) VerifyVoteExtension(
ctx context.Context,
req types.RequestVerifyVoteExtension,
) (*types.ResponseVerifyVoteExtension, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))()
return app.appConn.VerifyVoteExtension(ctx, req)
}
func (app *appConnConsensus) FinalizeBlock(
ctx context.Context,
req types.RequestFinalizeBlock,
) (*types.ResponseFinalizeBlock, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))()
return app.appConn.FinalizeBlock(ctx, req)
}
func (app *appConnConsensus) Commit(ctx context.Context) (*types.ResponseCommit, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))()
return app.appConn.Commit(ctx)
}
//------------------------------------------------
// Implements AppConnMempool (subset of abciclient.Client)
type appConnMempool struct {
metrics *Metrics
appConn abciclient.Client
}
func NewAppConnMempool(appConn abciclient.Client, metrics *Metrics) AppConnMempool {
return &appConnMempool{
metrics: metrics,
appConn: appConn,
}
}
func (app *appConnMempool) Error() error {
return app.appConn.Error()
}
func (app *appConnMempool) Flush(ctx context.Context) error {
defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))()
return app.appConn.Flush(ctx)
}
func (app *appConnMempool) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))()
return app.appConn.CheckTx(ctx, req)
}
//------------------------------------------------
// Implements AppConnQuery (subset of abciclient.Client)
type appConnQuery struct {
metrics *Metrics
appConn abciclient.Client
}
func NewAppConnQuery(appConn abciclient.Client, metrics *Metrics) AppConnQuery {
return &appConnQuery{
metrics: metrics,
appConn: appConn,
}
}
func (app *appConnQuery) Error() error {
return app.appConn.Error()
}
func (app *appConnQuery) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))()
return app.appConn.Echo(ctx, msg)
}
func (app *appConnQuery) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))()
return app.appConn.Info(ctx, req)
}
func (app *appConnQuery) Query(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))()
return app.appConn.Query(ctx, reqQuery)
}
//------------------------------------------------
// Implements AppConnSnapshot (subset of abciclient.Client)
type appConnSnapshot struct {
metrics *Metrics
appConn abciclient.Client
}
func NewAppConnSnapshot(appConn abciclient.Client, metrics *Metrics) AppConnSnapshot {
return &appConnSnapshot{
metrics: metrics,
appConn: appConn,
}
}
func (app *appConnSnapshot) Error() error {
return app.appConn.Error()
}
func (app *appConnSnapshot) ListSnapshots(
ctx context.Context,
req types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))()
return app.appConn.ListSnapshots(ctx, req)
}
func (app *appConnSnapshot) OfferSnapshot(
ctx context.Context,
req types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))()
return app.appConn.OfferSnapshot(ctx, req)
}
func (app *appConnSnapshot) LoadSnapshotChunk(
ctx context.Context,
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))()
return app.appConn.LoadSnapshotChunk(ctx, req)
}
func (app *appConnSnapshot) ApplySnapshotChunk(
ctx context.Context,
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))()
return app.appConn.ApplySnapshotChunk(ctx, req)
}
// addTimeSample returns a function that, when called, adds an observation to m.
// The observation added to m is the number of seconds ellapsed since addTimeSample
// was initially called. addTimeSample is meant to be called in a defer to calculate
// the amount of time a function takes to complete.
func addTimeSample(m metrics.Histogram) func() {
start := time.Now()
return func() { m.Observe(time.Since(start).Seconds()) }
}

+ 182
- 11
internal/proxy/client.go View File

@ -1,42 +1,213 @@
package proxy
import (
"context"
"io"
"os"
"syscall"
"time"
"github.com/go-kit/kit/metrics"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
e2e "github.com/tendermint/tendermint/test/e2e/app"
)
// DefaultClientCreator returns a default ClientCreator, which will create a
// local client if addr is one of: 'kvstore',
// 'persistent_kvstore', 'e2e', or 'noop', otherwise - a remote client.
// ClientFactory returns a client object, which will create a local
// client if addr is one of: 'kvstore', 'persistent_kvstore', 'e2e',
// or 'noop', otherwise - a remote client.
//
// The Closer is a noop except for persistent_kvstore applications,
// which will clean up the store.
func DefaultClientCreator(logger log.Logger, addr, transport, dbDir string) (abciclient.Creator, io.Closer) {
func ClientFactory(logger log.Logger, addr, transport, dbDir string) (abciclient.Client, io.Closer, error) {
switch addr {
case "kvstore":
return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{}
return abciclient.NewLocalClient(logger, kvstore.NewApplication()), noopCloser{}, nil
case "persistent_kvstore":
app := kvstore.NewPersistentKVStoreApplication(logger, dbDir)
return abciclient.NewLocalCreator(app), app
return abciclient.NewLocalClient(logger, app), app, nil
case "e2e":
app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir))
if err != nil {
panic(err)
return nil, noopCloser{}, err
}
return abciclient.NewLocalCreator(app), noopCloser{}
return abciclient.NewLocalClient(logger, app), noopCloser{}, nil
case "noop":
return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{}
return abciclient.NewLocalClient(logger, types.NewBaseApplication()), noopCloser{}, nil
default:
mustConnect := false // loop retrying
return abciclient.NewRemoteCreator(logger, addr, transport, mustConnect), noopCloser{}
const mustConnect = false // loop retrying
client, err := abciclient.NewClient(logger, addr, transport, mustConnect)
if err != nil {
return nil, noopCloser{}, err
}
return client, noopCloser{}, nil
}
}
type noopCloser struct{}
func (noopCloser) Close() error { return nil }
// proxyClient provides the application connection.
type proxyClient struct {
service.BaseService
logger log.Logger
client abciclient.Client
metrics *Metrics
}
// New creates a proxy application interface.
func New(client abciclient.Client, logger log.Logger, metrics *Metrics) abciclient.Client {
conn := &proxyClient{
logger: logger,
metrics: metrics,
client: client,
}
conn.BaseService = *service.NewBaseService(logger, "proxyClient", conn)
return conn
}
func (app *proxyClient) OnStop() { tryCallStop(app.client) }
func (app *proxyClient) Error() error { return app.client.Error() }
func tryCallStop(client abciclient.Client) {
if c, ok := client.(interface{ Stop() }); ok {
c.Stop()
}
}
func (app *proxyClient) OnStart(ctx context.Context) error {
var err error
defer func() {
if err != nil {
tryCallStop(app.client)
}
}()
// Kill Tendermint if the ABCI application crashes.
go func() {
if !app.client.IsRunning() {
return
}
app.client.Wait()
if ctx.Err() != nil {
return
}
if err := app.client.Error(); err != nil {
app.logger.Error("client connection terminated. Did the application crash? Please restart tendermint",
"err", err)
if killErr := kill(); killErr != nil {
app.logger.Error("Failed to kill this process - please do so manually",
"err", killErr)
}
}
}()
return app.client.Start(ctx)
}
func kill() error {
p, err := os.FindProcess(os.Getpid())
if err != nil {
return err
}
return p.Signal(syscall.SIGABRT)
}
func (app *proxyClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))()
return app.client.InitChain(ctx, req)
}
func (app *proxyClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))()
return app.client.PrepareProposal(ctx, req)
}
func (app *proxyClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))()
return app.client.ProcessProposal(ctx, req)
}
func (app *proxyClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))()
return app.client.ExtendVote(ctx, req)
}
func (app *proxyClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))()
return app.client.VerifyVoteExtension(ctx, req)
}
func (app *proxyClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))()
return app.client.FinalizeBlock(ctx, req)
}
func (app *proxyClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))()
return app.client.Commit(ctx)
}
func (app *proxyClient) Flush(ctx context.Context) error {
defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))()
return app.client.Flush(ctx)
}
func (app *proxyClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))()
return app.client.CheckTx(ctx, req)
}
func (app *proxyClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))()
return app.client.Echo(ctx, msg)
}
func (app *proxyClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))()
return app.client.Info(ctx, req)
}
func (app *proxyClient) Query(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))()
return app.client.Query(ctx, reqQuery)
}
func (app *proxyClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))()
return app.client.ListSnapshots(ctx, req)
}
func (app *proxyClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))()
return app.client.OfferSnapshot(ctx, req)
}
func (app *proxyClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))()
return app.client.LoadSnapshotChunk(ctx, req)
}
func (app *proxyClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))()
return app.client.ApplySnapshotChunk(ctx, req)
}
// addTimeSample returns a function that, when called, adds an observation to m.
// The observation added to m is the number of seconds ellapsed since addTimeSample
// was initially called. addTimeSample is meant to be called in a defer to calculate
// the amount of time a function takes to complete.
func addTimeSample(m metrics.Histogram) func() {
start := time.Now()
return func() { m.Observe(time.Since(start).Seconds()) }
}

internal/proxy/app_conn_test.go → internal/proxy/client_test.go View File


+ 0
- 131
internal/proxy/multi_app_conn.go View File

@ -1,131 +0,0 @@
package proxy
import (
"context"
"os"
"syscall"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
)
// AppConns is the Tendermint's interface to the application that consists of
// multiple connections.
type AppConns interface {
service.Service
// Mempool connection
Mempool() AppConnMempool
// Consensus connection
Consensus() AppConnConsensus
// Query connection
Query() AppConnQuery
// Snapshot connection
Snapshot() AppConnSnapshot
}
// NewAppConns calls NewMultiAppConn.
func NewAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns {
return NewMultiAppConn(clientCreator, logger, metrics)
}
// multiAppConn implements AppConns.
//
// A multiAppConn is made of a few appConns and manages their underlying abci
// clients.
// TODO: on app restart, clients must reboot together
type multiAppConn struct {
service.BaseService
logger log.Logger
metrics *Metrics
consensusConn AppConnConsensus
mempoolConn AppConnMempool
queryConn AppConnQuery
snapshotConn AppConnSnapshot
client stoppableClient
clientCreator abciclient.Creator
}
// TODO: this is a totally internal and quasi permanent shim for
// clients. eventually we can have a single client and have some kind
// of reasonable lifecycle witout needing an explicit stop method.
type stoppableClient interface {
abciclient.Client
Stop()
}
// NewMultiAppConn makes all necessary abci connections to the application.
func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns {
multiAppConn := &multiAppConn{
logger: logger,
metrics: metrics,
clientCreator: clientCreator,
}
multiAppConn.BaseService = *service.NewBaseService(logger, "multiAppConn", multiAppConn)
return multiAppConn
}
func (app *multiAppConn) Mempool() AppConnMempool { return app.mempoolConn }
func (app *multiAppConn) Consensus() AppConnConsensus { return app.consensusConn }
func (app *multiAppConn) Query() AppConnQuery { return app.queryConn }
func (app *multiAppConn) Snapshot() AppConnSnapshot { return app.snapshotConn }
func (app *multiAppConn) OnStart(ctx context.Context) error {
var err error
defer func() {
if err != nil {
app.client.Stop()
}
}()
var client abciclient.Client
client, err = app.clientCreator(app.logger)
if err != nil {
return err
}
app.queryConn = NewAppConnQuery(client, app.metrics)
app.snapshotConn = NewAppConnSnapshot(client, app.metrics)
app.mempoolConn = NewAppConnMempool(client, app.metrics)
app.consensusConn = NewAppConnConsensus(client, app.metrics)
app.client = client.(stoppableClient)
// Kill Tendermint if the ABCI application crashes.
go func() {
if !client.IsRunning() {
return
}
app.client.Wait()
if ctx.Err() != nil {
return
}
if err := app.client.Error(); err != nil {
app.logger.Error("client connection terminated. Did the application crash? Please restart tendermint",
"err", err)
if killErr := kill(); killErr != nil {
app.logger.Error("Failed to kill this process - please do so manually",
"err", killErr)
}
}
}()
return client.Start(ctx)
}
func (app *multiAppConn) OnStop() { app.client.Stop() }
func kill() error {
p, err := os.FindProcess(os.Getpid())
if err != nil {
return err
}
return p.Signal(syscall.SIGTERM)
}

+ 0
- 99
internal/proxy/multi_app_conn_test.go View File

@ -1,99 +0,0 @@
package proxy
import (
"context"
"errors"
"os"
"os/signal"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abciclient "github.com/tendermint/tendermint/abci/client"
abcimocks "github.com/tendermint/tendermint/abci/client/mocks"
"github.com/tendermint/tendermint/libs/log"
)
type noopStoppableClientImpl struct {
abciclient.Client
count int
}
func (c *noopStoppableClientImpl) Stop() { c.count++ }
func TestAppConns_Start_Stop(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clientMock := &abcimocks.Client{}
clientMock.On("Start", mock.Anything).Return(nil)
clientMock.On("Error").Return(nil)
clientMock.On("IsRunning").Return(true)
clientMock.On("Wait").Return(nil).Times(1)
cl := &noopStoppableClientImpl{Client: clientMock}
creatorCallCount := 0
creator := func(logger log.Logger) (abciclient.Client, error) {
creatorCallCount++
return cl, nil
}
appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics())
err := appConns.Start(ctx)
require.NoError(t, err)
time.Sleep(200 * time.Millisecond)
cancel()
appConns.Wait()
clientMock.AssertExpectations(t)
assert.Equal(t, 1, cl.count)
assert.Equal(t, 1, creatorCallCount)
}
// Upon failure, we call tmos.Kill
func TestAppConns_Failure(t *testing.T) {
ok := make(chan struct{})
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM)
go func() {
for range c {
close(ok)
return
}
}()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clientMock := &abcimocks.Client{}
clientMock.On("SetLogger", mock.Anything).Return()
clientMock.On("Start", mock.Anything).Return(nil)
clientMock.On("IsRunning").Return(true)
clientMock.On("Wait").Return(nil)
clientMock.On("Error").Return(errors.New("EOF"))
cl := &noopStoppableClientImpl{Client: clientMock}
creator := func(log.Logger) (abciclient.Client, error) {
return cl, nil
}
appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics())
err := appConns.Start(ctx)
require.NoError(t, err)
t.Cleanup(func() { cancel(); appConns.Wait() })
select {
case <-ok:
t.Log("SIGTERM successfully received")
case <-time.After(5 * time.Second):
t.Fatal("expected process to receive SIGTERM signal")
}
}

+ 0
- 20
internal/pubsub/pubsub.go View File

@ -153,26 +153,6 @@ func BufferCapacity(cap int) Option {
// BufferCapacity returns capacity of the publication queue.
func (s *Server) BufferCapacity() int { return cap(s.queue) }
// Subscribe creates a subscription for the given client ID and query.
// If len(capacities) > 0, its first value is used as the queue capacity.
//
// Deprecated: Use SubscribeWithArgs. This method will be removed in v0.36.
func (s *Server) Subscribe(ctx context.Context, clientID string, query *query.Query, capacities ...int) (*Subscription, error) {
args := SubscribeArgs{
ClientID: clientID,
Query: query,
Limit: 1,
}
if len(capacities) > 0 {
args.Limit = capacities[0]
if len(capacities) > 1 {
args.Quota = capacities[1]
}
// bounds are checked below
}
return s.SubscribeWithArgs(ctx, args)
}
// Observe registers an observer function that will be called synchronously
// with each published message matching any of the given queries, prior to it
// being forwarded to any subscriber. If no queries are specified, all


+ 1
- 1
internal/pubsub/query/syntax/syntax_test.go View File

@ -55,7 +55,7 @@ func TestScanner(t *testing.T) {
got = append(got, s.Token())
}
if err := s.Err(); err != io.EOF {
t.Errorf("Next: unexpected error: %w", err)
t.Errorf("Next: unexpected error: %v", err)
}
if !reflect.DeepEqual(got, test.want) {


+ 2
- 2
internal/rpc/core/abci.go View File

@ -18,7 +18,7 @@ func (env *Environment) ABCIQuery(
height int64,
prove bool,
) (*coretypes.ResultABCIQuery, error) {
resQuery, err := env.ProxyAppQuery.Query(ctx, abci.RequestQuery{
resQuery, err := env.ProxyApp.Query(ctx, abci.RequestQuery{
Path: path,
Data: data,
Height: height,
@ -34,7 +34,7 @@ func (env *Environment) ABCIQuery(
// ABCIInfo gets some info about the application.
// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info
func (env *Environment) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) {
resInfo, err := env.ProxyAppQuery.Info(ctx, proxy.RequestInfo)
resInfo, err := env.ProxyApp.Info(ctx, proxy.RequestInfo)
if err != nil {
return nil, err
}


+ 5
- 7
internal/rpc/core/blocks.go View File

@ -193,8 +193,6 @@ func (env *Environment) Commit(ctx context.Context, heightPtr *int64) (*coretype
// If no height is provided, it will fetch results for the latest block.
//
// Results are for the height of the block containing the txs.
// Thus response.results.deliver_tx[5] is the results of executing
// getBlock(h).Txs[5]
// More: https://docs.tendermint.com/master/rpc/#/Info/block_results
func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
@ -208,13 +206,13 @@ func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*co
}
var totalGasUsed int64
for _, tx := range results.FinalizeBlock.GetTxs() {
totalGasUsed += tx.GetGasUsed()
for _, res := range results.FinalizeBlock.GetTxResults() {
totalGasUsed += res.GetGasUsed()
}
return &coretypes.ResultBlockResults{
Height: height,
TxsResults: results.FinalizeBlock.Txs,
TxsResults: results.FinalizeBlock.TxResults,
TotalGasUsed: totalGasUsed,
FinalizeBlockEvents: results.FinalizeBlock.Events,
ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates,
@ -222,8 +220,8 @@ func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*co
}, nil
}
// BlockSearch searches for a paginated set of blocks matching BeginBlock and
// EndBlock event search criteria.
// BlockSearch searches for a paginated set of blocks matching the provided
// query.
func (env *Environment) BlockSearch(
ctx context.Context,
query string,


+ 2
- 2
internal/rpc/core/blocks_test.go View File

@ -72,7 +72,7 @@ func TestBlockchainInfo(t *testing.T) {
func TestBlockResults(t *testing.T) {
results := &tmstate.ABCIResponses{
FinalizeBlock: &abci.ResponseFinalizeBlock{
Txs: []*abci.ResponseDeliverTx{
TxResults: []*abci.ExecTxResult{
{Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10},
{Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5},
{Code: 1, Log: "not ok", GasUsed: 0},
@ -99,7 +99,7 @@ func TestBlockResults(t *testing.T) {
{101, true, nil},
{100, false, &coretypes.ResultBlockResults{
Height: 100,
TxsResults: results.FinalizeBlock.Txs,
TxsResults: results.FinalizeBlock.TxResults,
TotalGasUsed: 15,
FinalizeBlockEvents: results.FinalizeBlock.Events,
ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates,


+ 9
- 11
internal/rpc/core/env.go View File

@ -11,6 +11,7 @@ import (
"github.com/rs/cors"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/blocksync"
@ -19,7 +20,6 @@ import (
"github.com/tendermint/tendermint/internal/eventlog"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/proxy"
tmpubsub "github.com/tendermint/tendermint/internal/pubsub"
"github.com/tendermint/tendermint/internal/pubsub/query"
sm "github.com/tendermint/tendermint/internal/state"
@ -57,12 +57,6 @@ type consensusState interface {
GetRoundStateSimpleJSON() ([]byte, error)
}
type transport interface {
Listeners() []string
IsListening() bool
NodeInfo() types.NodeInfo
}
type peerManager interface {
Peers() []types.NodeID
Addresses(types.NodeID) []p2p.NodeAddress
@ -73,8 +67,7 @@ type peerManager interface {
// to be setup once during startup.
type Environment struct {
// external, thread safe interfaces
ProxyAppQuery proxy.AppConnQuery
ProxyAppMempool proxy.AppConnMempool
ProxyApp abciclient.Client
// interfaces defined in types and above
StateStore sm.Store
@ -84,8 +77,9 @@ type Environment struct {
ConsensusReactor *consensus.Reactor
BlockSyncReactor *blocksync.Reactor
// Legacy p2p stack
P2PTransport transport
IsListening bool
Listeners []string
NodeInfo types.NodeInfo
// interfaces for new p2p interfaces
PeerManager peerManager
@ -226,6 +220,10 @@ func (env *Environment) StartService(ctx context.Context, conf *config.Config) (
return nil, err
}
env.Listeners = []string{
fmt.Sprintf("Listener(@%v)", conf.P2P.ExternalAddress),
}
listenAddrs := strings.SplitAndTrimEmpty(conf.RPC.ListenAddress, ",", " ")
routes := NewRoutesMap(env, &RouteOptions{
Unsafe: conf.RPC.Unsafe,


+ 5
- 5
internal/rpc/core/mempool.go View File

@ -114,10 +114,10 @@ func (env *Environment) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*co
}
return &coretypes.ResultBroadcastTxCommit{
CheckTx: *r,
DeliverTx: txres.TxResult,
Hash: tx.Hash(),
Height: txres.Height,
CheckTx: *r,
TxResult: txres.TxResult,
Hash: tx.Hash(),
Height: txres.Height,
}, nil
}
}
@ -158,7 +158,7 @@ func (env *Environment) NumUnconfirmedTxs(ctx context.Context) (*coretypes.Resul
// be added to the mempool either.
// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx
func (env *Environment) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) {
res, err := env.ProxyAppMempool.CheckTx(ctx, abci.RequestCheckTx{Tx: tx})
res, err := env.ProxyApp.CheckTx(ctx, abci.RequestCheckTx{Tx: tx})
if err != nil {
return nil, err
}


+ 2
- 2
internal/rpc/core/net.go View File

@ -27,8 +27,8 @@ func (env *Environment) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo,
}
return &coretypes.ResultNetInfo{
Listening: env.P2PTransport.IsListening(),
Listeners: env.P2PTransport.Listeners(),
Listening: env.IsListening,
Listeners: env.Listeners,
NPeers: len(peers),
Peers: peers,
}, nil


+ 1
- 1
internal/rpc/core/status.go View File

@ -66,7 +66,7 @@ func (env *Environment) Status(ctx context.Context) (*coretypes.ResultStatus, er
}
result := &coretypes.ResultStatus{
NodeInfo: env.P2PTransport.NodeInfo(),
NodeInfo: env.NodeInfo,
ApplicationInfo: applicationInfo,
SyncInfo: coretypes.SyncInfo{
LatestBlockHash: latestBlockHash,


+ 76
- 107
internal/state/execution.go View File

@ -2,17 +2,17 @@ package state
import (
"context"
"errors"
"fmt"
"time"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/proxy"
"github.com/tendermint/tendermint/libs/log"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
tmtypes "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
@ -30,7 +30,7 @@ type BlockExecutor struct {
blockStore BlockStore
// execute the app against this
proxyApp proxy.AppConnConsensus
appClient abciclient.Client
// events
eventBus types.BlockEventPublisher
@ -60,16 +60,17 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption {
func NewBlockExecutor(
stateStore Store,
logger log.Logger,
proxyApp proxy.AppConnConsensus,
appClient abciclient.Client,
pool mempool.Mempool,
evpool EvidencePool,
blockStore BlockStore,
eventBus *eventbus.EventBus,
options ...BlockExecutorOption,
) *BlockExecutor {
res := &BlockExecutor{
eventBus: eventBus,
store: stateStore,
proxyApp: proxyApp,
eventBus: eventbus.NopEventBus{},
appClient: appClient,
mempool: pool,
evpool: evpool,
logger: logger,
@ -89,12 +90,6 @@ func (blockExec *BlockExecutor) Store() Store {
return blockExec.store
}
// SetEventBus - sets the event bus for publishing block related events.
// If not called, it defaults to types.NopEventBus.
func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) {
blockExec.eventBus = eventBus
}
// CreateProposalBlock calls state.MakeBlock with evidence from the evpool
// and txs from the mempool. The max bytes must be big enough to fit the commit.
// Up to 1/10th of the block space is allcoated for maximum sized evidence.
@ -119,7 +114,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock(
txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas)
preparedProposal, err := blockExec.proxyApp.PrepareProposal(
preparedProposal, err := blockExec.appClient.PrepareProposal(
ctx,
abci.RequestPrepareProposal{
BlockData: txs.ToSliceOfBytes(),
@ -162,11 +157,11 @@ func (blockExec *BlockExecutor) ProcessProposal(
Hash: block.Header.Hash(),
Header: *block.Header.ToProto(),
Txs: block.Data.Txs.ToSliceOfBytes(),
LastCommitInfo: buildLastCommitInfo(block, blockExec.store, state.InitialHeight),
ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight),
ByzantineValidators: block.Evidence.ToABCI(),
}
resp, err := blockExec.proxyApp.ProcessProposal(ctx, req)
resp, err := blockExec.appClient.ProcessProposal(ctx, req)
if err != nil {
return false, ErrInvalidBlock(err)
}
@ -207,18 +202,22 @@ func (blockExec *BlockExecutor) ValidateBlock(ctx context.Context, state State,
func (blockExec *BlockExecutor) ApplyBlock(
ctx context.Context,
state State,
blockID types.BlockID,
block *types.Block,
) (State, error) {
blockID types.BlockID, block *types.Block) (State, error) {
// validate the block if we haven't already
if err := blockExec.ValidateBlock(ctx, state, block); err != nil {
return state, ErrInvalidBlock(err)
}
startTime := time.Now().UnixNano()
abciResponses, err := execBlockOnProxyApp(ctx,
blockExec.logger, blockExec.proxyApp, block, blockExec.store, state.InitialHeight,
pbh := block.Header.ToProto()
finalizeBlockResponse, err := blockExec.appClient.FinalizeBlock(
ctx,
abci.RequestFinalizeBlock{
Hash: block.Hash(),
Header: *pbh,
Txs: block.Txs.ToSliceOfBytes(),
DecidedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight),
ByzantineValidators: block.Evidence.ToABCI(),
},
)
endTime := time.Now().UnixNano()
blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000)
@ -226,19 +225,22 @@ func (blockExec *BlockExecutor) ApplyBlock(
return state, ErrProxyAppConn(err)
}
abciResponses := &tmstate.ABCIResponses{
FinalizeBlock: finalizeBlockResponse,
}
// Save the results before we commit.
if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil {
return state, err
}
// validate the validator updates and convert to tendermint types
abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates
err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator)
err = validateValidatorUpdates(finalizeBlockResponse.ValidatorUpdates, state.ConsensusParams.Validator)
if err != nil {
return state, fmt.Errorf("error in validator updates: %w", err)
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates)
validatorUpdates, err := types.PB2TM.ValidatorUpdates(finalizeBlockResponse.ValidatorUpdates)
if err != nil {
return state, err
}
@ -247,13 +249,13 @@ func (blockExec *BlockExecutor) ApplyBlock(
}
// Update the state with the block and responses.
state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
state, err = state.Update(blockID, &block.Header, ABCIResponsesResultsHash(abciResponses), finalizeBlockResponse.ConsensusParamUpdates, validatorUpdates)
if err != nil {
return state, fmt.Errorf("commit failed for application: %w", err)
}
// Lock mempool, commit app state, update mempoool.
appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.FinalizeBlock.Txs)
appHash, retainHeight, err := blockExec.Commit(ctx, state, block, finalizeBlockResponse.TxResults)
if err != nil {
return state, fmt.Errorf("commit failed for application: %w", err)
}
@ -282,7 +284,7 @@ func (blockExec *BlockExecutor) ApplyBlock(
// Events are fired after everything else.
// NOTE: if we crash between Commit and Save, events wont be fired during replay
fireEvents(ctx, blockExec.logger, blockExec.eventBus, block, blockID, abciResponses, validatorUpdates)
fireEvents(ctx, blockExec.logger, blockExec.eventBus, block, blockID, finalizeBlockResponse, validatorUpdates)
return state, nil
}
@ -292,7 +294,7 @@ func (blockExec *BlockExecutor) ExtendVote(ctx context.Context, vote *types.Vote
Vote: vote.ToProto(),
}
resp, err := blockExec.proxyApp.ExtendVote(ctx, req)
resp, err := blockExec.appClient.ExtendVote(ctx, req)
if err != nil {
return types.VoteExtension{}, err
}
@ -304,7 +306,7 @@ func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *t
Vote: vote.ToProto(),
}
resp, err := blockExec.proxyApp.VerifyVoteExtension(ctx, req)
resp, err := blockExec.appClient.VerifyVoteExtension(ctx, req)
if err != nil {
return err
}
@ -326,7 +328,7 @@ func (blockExec *BlockExecutor) Commit(
ctx context.Context,
state State,
block *types.Block,
deliverTxResponses []*abci.ResponseDeliverTx,
txResults []*abci.ExecTxResult,
) ([]byte, int64, error) {
blockExec.mempool.Lock()
defer blockExec.mempool.Unlock()
@ -340,7 +342,7 @@ func (blockExec *BlockExecutor) Commit(
}
// Commit block, get hash back
res, err := blockExec.proxyApp.Commit(ctx)
res, err := blockExec.appClient.Commit(ctx)
if err != nil {
blockExec.logger.Error("client error during proxyAppConn.Commit", "err", err)
return nil, 0, err
@ -359,63 +361,19 @@ func (blockExec *BlockExecutor) Commit(
ctx,
block.Height,
block.Txs,
deliverTxResponses,
TxPreCheck(state),
TxPostCheck(state),
txResults,
TxPreCheckForState(state),
TxPostCheckForState(state),
)
return res.Data, res.RetainHeight, err
}
//---------------------------------------------------------
// Helper functions for executing blocks and updating state
// Executes block's transactions on proxyAppConn.
// Returns a list of transaction results and updates to the validator set
func execBlockOnProxyApp(
ctx context.Context,
logger log.Logger,
proxyAppConn proxy.AppConnConsensus,
block *types.Block,
store Store,
initialHeight int64,
) (*tmstate.ABCIResponses, error) {
abciResponses := new(tmstate.ABCIResponses)
abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{}
dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs))
abciResponses.FinalizeBlock.Txs = dtxs
// Begin block
var err error
pbh := block.Header.ToProto()
if pbh == nil {
return nil, errors.New("nil header")
}
abciResponses.FinalizeBlock, err = proxyAppConn.FinalizeBlock(
ctx,
abci.RequestFinalizeBlock{
Txs: block.Txs.ToSliceOfBytes(),
Hash: block.Hash(),
Header: *pbh,
Height: block.Height,
LastCommitInfo: buildLastCommitInfo(block, store, initialHeight),
ByzantineValidators: block.Evidence.ToABCI(),
},
)
if err != nil {
logger.Error("error in proxyAppConn.FinalizeBlock", "err", err)
return nil, err
}
logger.Info("executed block", "height", block.Height)
return abciResponses, nil
}
func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.LastCommitInfo {
func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo {
if block.Height == initialHeight {
// there is no last commmit for the initial height.
// return an empty value.
return abci.LastCommitInfo{}
return abci.CommitInfo{}
}
lastValSet, err := store.LoadValidators(block.Height - 1)
@ -446,7 +404,7 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a
}
}
return abci.LastCommitInfo{
return abci.CommitInfo{
Round: block.LastCommit.Round,
Votes: votes,
}
@ -477,16 +435,16 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate,
return nil
}
// updateState returns a new State updated according to the header and responses.
func updateState(
state State,
// Update returns a copy of state with the fields set using the arguments passed in.
func (state State) Update(
blockID types.BlockID,
header *types.Header,
abciResponses *tmstate.ABCIResponses,
resultsHash []byte,
consensusParamUpdates *tmtypes.ConsensusParams,
validatorUpdates []*types.Validator,
) (State, error) {
// Copy the valset so we can apply changes from EndBlock
// Copy the valset so we can apply changes from FinalizeBlock
// and update s.LastValidators and s.Validators.
nValSet := state.NextValidators.Copy()
@ -507,9 +465,9 @@ func updateState(
// Update the params with the latest abciResponses.
nextParams := state.ConsensusParams
lastHeightParamsChanged := state.LastHeightConsensusParamsChanged
if abciResponses.FinalizeBlock.ConsensusParamUpdates != nil {
// NOTE: must not mutate s.ConsensusParams
nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.FinalizeBlock.ConsensusParamUpdates)
if consensusParamUpdates != nil {
// NOTE: must not mutate state.ConsensusParams
nextParams = state.ConsensusParams.UpdateConsensusParams(consensusParamUpdates)
err := nextParams.ValidateConsensusParams()
if err != nil {
return state, fmt.Errorf("error updating consensus params: %w", err)
@ -538,7 +496,7 @@ func updateState(
LastHeightValidatorsChanged: lastHeightValsChanged,
ConsensusParams: nextParams,
LastHeightConsensusParamsChanged: lastHeightParamsChanged,
LastResultsHash: ABCIResponsesResultsHash(abciResponses),
LastResultsHash: resultsHash,
AppHash: nil,
}, nil
}
@ -552,13 +510,13 @@ func fireEvents(
eventBus types.BlockEventPublisher,
block *types.Block,
blockID types.BlockID,
abciResponses *tmstate.ABCIResponses,
finalizeBlockResponse *abci.ResponseFinalizeBlock,
validatorUpdates []*types.Validator,
) {
if err := eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{
Block: block,
BlockID: blockID,
ResultFinalizeBlock: *abciResponses.FinalizeBlock,
ResultFinalizeBlock: *finalizeBlockResponse,
}); err != nil {
logger.Error("failed publishing new block", "err", err)
}
@ -566,7 +524,7 @@ func fireEvents(
if err := eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{
Header: block.Header,
NumTxs: int64(len(block.Txs)),
ResultFinalizeBlock: *abciResponses.FinalizeBlock,
ResultFinalizeBlock: *finalizeBlockResponse,
}); err != nil {
logger.Error("failed publishing new block header", "err", err)
}
@ -583,9 +541,9 @@ func fireEvents(
}
// sanity check
if len(abciResponses.FinalizeBlock.Txs) != len(block.Data.Txs) {
if len(finalizeBlockResponse.TxResults) != len(block.Data.Txs) {
panic(fmt.Sprintf("number of TXs (%d) and ABCI TX responses (%d) do not match",
len(block.Data.Txs), len(abciResponses.FinalizeBlock.Txs)))
len(block.Data.Txs), len(finalizeBlockResponse.TxResults)))
}
for i, tx := range block.Data.Txs {
@ -594,14 +552,14 @@ func fireEvents(
Height: block.Height,
Index: uint32(i),
Tx: tx,
Result: *(abciResponses.FinalizeBlock.Txs[i]),
Result: *(finalizeBlockResponse.TxResults[i]),
},
}); err != nil {
logger.Error("failed publishing event TX", "err", err)
}
}
if len(validatorUpdates) > 0 {
if len(finalizeBlockResponse.ValidatorUpdates) > 0 {
if err := eventBus.PublishEventValidatorSetUpdates(ctx,
types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil {
logger.Error("failed publishing event", "err", err)
@ -617,30 +575,41 @@ func fireEvents(
func ExecCommitBlock(
ctx context.Context,
be *BlockExecutor,
appConnConsensus proxy.AppConnConsensus,
appConnConsensus abciclient.Client,
block *types.Block,
logger log.Logger,
store Store,
initialHeight int64,
s State,
) ([]byte, error) {
abciResponses, err := execBlockOnProxyApp(ctx, logger, appConnConsensus, block, store, initialHeight)
pbh := block.Header.ToProto()
finalizeBlockResponse, err := appConnConsensus.FinalizeBlock(
ctx,
abci.RequestFinalizeBlock{
Hash: block.Hash(),
Header: *pbh,
Txs: block.Txs.ToSliceOfBytes(),
DecidedLastCommit: buildLastCommitInfo(block, store, initialHeight),
ByzantineValidators: block.Evidence.ToABCI(),
},
)
if err != nil {
logger.Error("failed executing block on proxy app", "height", block.Height, "err", err)
logger.Error("executing block", "err", err)
return nil, err
}
logger.Info("executed block", "height", block.Height)
// the BlockExecutor condition is using for the final block replay process.
if be != nil {
abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates
err = validateValidatorUpdates(abciValUpdates, s.ConsensusParams.Validator)
err = validateValidatorUpdates(finalizeBlockResponse.ValidatorUpdates, s.ConsensusParams.Validator)
if err != nil {
logger.Error("err", err)
logger.Error("validating validator updates", "err", err)
return nil, err
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates)
validatorUpdates, err := types.PB2TM.ValidatorUpdates(finalizeBlockResponse.ValidatorUpdates)
if err != nil {
logger.Error("err", err)
logger.Error("converting validator updates to native types", "err", err)
return nil, err
}
@ -650,7 +619,7 @@ func ExecCommitBlock(
}
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}
fireEvents(ctx, be.logger, be.eventBus, block, blockID, abciResponses, validatorUpdates)
fireEvents(ctx, be.logger, be.eventBus, block, blockID, finalizeBlockResponse, validatorUpdates)
}
// Commit block, get hash back


+ 88
- 84
internal/state/execution_test.go View File

@ -27,7 +27,6 @@ import (
"github.com/tendermint/tendermint/internal/store"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmtime "github.com/tendermint/tendermint/libs/time"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
@ -39,21 +38,22 @@ var (
func TestApplyBlock(t *testing.T) {
app := &testApp{}
cc := abciclient.NewLocalCreator(app)
logger := log.TestingLogger()
proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics())
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
err := proxyApp.Start(ctx)
require.NoError(t, err)
require.NoError(t, proxyApp.Start(ctx))
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
state, stateDB, _ := makeState(t, 1, 1)
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(dbm.NewMemDB())
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(),
mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore, eventBus)
block, err := sf.MakeBlock(state, 1, new(types.Commit))
require.NoError(t, err)
@ -68,85 +68,81 @@ func TestApplyBlock(t *testing.T) {
assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated")
}
// TestBeginBlockValidators ensures we send absent validators list.
func TestBeginBlockValidators(t *testing.T) {
// TestFinalizeBlockDecidedLastCommit ensures we correctly send the DecidedLastCommit to the
// application. The test ensures that the DecidedLastCommit properly reflects
// which validators signed the preceding block.
func TestFinalizeBlockDecidedLastCommit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.TestingLogger()
app := &testApp{}
cc := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics())
cc := abciclient.NewLocalClient(logger, app)
appClient := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
err := appClient.Start(ctx)
require.NoError(t, err)
state, stateDB, _ := makeState(t, 2, 2)
state, stateDB, privVals := makeState(t, 7, 1)
stateStore := sm.NewStore(stateDB)
prevHash := state.LastBlockID.Hash
prevParts := types.PartSetHeader{}
prevBlockID := types.BlockID{Hash: prevHash, PartSetHeader: prevParts}
var (
now = tmtime.Now()
commitSig0 = types.NewCommitSigForBlock(
[]byte("Signature1"),
state.Validators.Validators[0].Address,
now,
types.VoteExtensionToSign{},
)
commitSig1 = types.NewCommitSigForBlock(
[]byte("Signature2"),
state.Validators.Validators[1].Address,
now,
types.VoteExtensionToSign{},
)
absentSig = types.NewCommitSigAbsent()
)
absentSig := types.NewCommitSigAbsent()
testCases := []struct {
desc string
lastCommitSigs []types.CommitSig
expectedAbsentValidators []int
name string
absentCommitSigs map[int]bool
}{
{"none absent", []types.CommitSig{commitSig0, commitSig1}, []int{}},
{"one absent", []types.CommitSig{commitSig0, absentSig}, []int{1}},
{"multiple absent", []types.CommitSig{absentSig, absentSig}, []int{0, 1}},
{"none absent", map[int]bool{}},
{"one absent", map[int]bool{1: true}},
{"multiple absent", map[int]bool{1: true, 3: true}},
}
for _, tc := range testCases {
lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs)
t.Run(tc.name, func(t *testing.T) {
blockStore := store.NewBlockStore(dbm.NewMemDB())
evpool := &mocks.EvidencePool{}
evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, 0)
evpool.On("Update", ctx, mock.Anything, mock.Anything).Return()
evpool.On("CheckEvidence", ctx, mock.Anything).Return(nil)
// block for height 2
block, err := sf.MakeBlock(state, 2, lastCommit)
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
_, err = sm.ExecCommitBlock(ctx, nil, proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1, state)
require.NoError(t, err, tc.desc)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), appClient, mmock.Mempool{}, evpool, blockStore, eventBus)
state, _, lastCommit := makeAndCommitGoodBlock(ctx, t, state, 1, new(types.Commit), state.NextValidators.Validators[0].Address, blockExec, privVals, nil)
// -> app receives a list of validators with a bool indicating if they signed
ctr := 0
for i, v := range app.CommitVotes {
if ctr < len(tc.expectedAbsentValidators) &&
tc.expectedAbsentValidators[ctr] == i {
for idx, isAbsent := range tc.absentCommitSigs {
if isAbsent {
lastCommit.Signatures[idx] = absentSig
}
}
assert.False(t, v.SignedLastBlock)
ctr++
} else {
assert.True(t, v.SignedLastBlock)
// block for height 2
block, err := sf.MakeBlock(state, 2, lastCommit)
require.NoError(t, err)
bps, err := block.MakePartSet(testPartSize)
require.NoError(t, err)
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}
_, err = blockExec.ApplyBlock(ctx, state, blockID, block)
require.NoError(t, err)
// -> app receives a list of validators with a bool indicating if they signed
for i, v := range app.CommitVotes {
_, absent := tc.absentCommitSigs[i]
assert.Equal(t, !absent, v.SignedLastBlock)
}
}
})
}
}
// TestBeginBlockByzantineValidators ensures we send byzantine validators list.
func TestBeginBlockByzantineValidators(t *testing.T) {
// TestFinalizeBlockByzantineValidators ensures we send byzantine validators list.
func TestFinalizeBlockByzantineValidators(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
app := &testApp{}
cc := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics())
logger := log.TestingLogger()
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
@ -220,10 +216,13 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
evpool.On("Update", ctx, mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return()
evpool.On("CheckEvidence", ctx, mock.AnythingOfType("types.EvidenceList")).Return(nil)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
blockStore := store.NewBlockStore(dbm.NewMemDB())
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mmock.Mempool{}, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp,
mmock.Mempool{}, evpool, blockStore, eventBus)
block, err := sf.MakeBlock(state, 1, new(types.Commit))
require.NoError(t, err)
@ -248,9 +247,9 @@ func TestProcessProposal(t *testing.T) {
defer cancel()
app := abcimocks.NewBaseMock()
cc := abciclient.NewLocalCreator(app)
logger := log.TestingLogger()
proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics())
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
@ -258,13 +257,17 @@ func TestProcessProposal(t *testing.T) {
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(dbm.NewMemDB())
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
blockExec := sm.NewBlockExecutor(
stateStore,
logger,
proxyApp.Consensus(),
proxyApp,
mmock.Mempool{},
sm.EmptyEvidencePool{},
blockStore,
eventBus,
)
block0, err := sf.MakeBlock(state, height-1, new(types.Commit))
@ -301,7 +304,7 @@ func TestProcessProposal(t *testing.T) {
Header: *block1.Header.ToProto(),
Txs: block1.Txs.ToSliceOfBytes(),
ByzantineValidators: block1.Evidence.ToABCI(),
LastCommitInfo: abci.LastCommitInfo{
ProposedLastCommit: abci.CommitInfo{
Round: 0,
Votes: voteInfos,
},
@ -445,15 +448,15 @@ func TestUpdateValidators(t *testing.T) {
}
}
// TestEndBlockValidatorUpdates ensures we update validator set and send an event.
func TestEndBlockValidatorUpdates(t *testing.T) {
// TestFinalizeBlockValidatorUpdates ensures we update validator set and send an event.
func TestFinalizeBlockValidatorUpdates(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
app := &testApp{}
cc := abciclient.NewLocalCreator(app)
logger := log.TestingLogger()
proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics())
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
@ -461,24 +464,21 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(dbm.NewMemDB())
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
blockExec := sm.NewBlockExecutor(
stateStore,
logger,
proxyApp.Consensus(),
proxyApp,
mmock.Mempool{},
sm.EmptyEvidencePool{},
blockStore,
eventBus,
)
eventBus := eventbus.NewDefault(logger)
err = eventBus.Start(ctx)
require.NoError(t, err)
defer eventBus.Stop()
blockExec.SetEventBus(eventBus)
updatesSub, err := eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
ClientID: "TestEndBlockValidatorUpdates",
ClientID: "TestFinalizeBlockValidatorUpdates",
Query: types.EventQueryValidatorSetUpdates,
})
require.NoError(t, err)
@ -519,29 +519,33 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
}
}
// TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that
// TestFinalizeBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that
// would result in empty set causes no panic, an error is raised and NextValidators is not updated
func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
app := &testApp{}
cc := abciclient.NewLocalCreator(app)
logger := log.TestingLogger()
proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics())
cc := abciclient.NewLocalClient(logger, app)
proxyApp := proxy.New(cc, logger, proxy.NopMetrics())
err := proxyApp.Start(ctx)
require.NoError(t, err)
eventBus := eventbus.NewDefault(logger)
require.NoError(t, eventBus.Start(ctx))
state, stateDB, _ := makeState(t, 1, 1)
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(dbm.NewMemDB())
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
proxyApp.Consensus(),
proxyApp,
mmock.Mempool{},
sm.EmptyEvidencePool{},
blockStore,
eventBus,
)
block, err := sf.MakeBlock(state, 1, new(types.Commit))


+ 0
- 24
internal/state/export_test.go View File

@ -2,33 +2,9 @@ package state
import (
abci "github.com/tendermint/tendermint/abci/types"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/types"
)
//
// TODO: Remove dependence on all entities exported from this file.
//
// Every entity exported here is dependent on a private entity from the `state`
// package. Currently, these functions are only made available to tests in the
// `state_test` package, but we should not be relying on them for our testing.
// Instead, we should be exclusively relying on exported entities for our
// testing, and should be refactoring exported entities to make them more
// easily testable from outside of the package.
//
// UpdateState is an alias for updateState exported from execution.go,
// exclusively and explicitly for testing.
func UpdateState(
state State,
blockID types.BlockID,
header *types.Header,
abciResponses *tmstate.ABCIResponses,
validatorUpdates []*types.Validator,
) (State, error) {
return updateState(state, blockID, header, abciResponses, validatorUpdates)
}
// ValidateValidatorUpdates is an alias for validateValidatorUpdates exported
// from execution.go, exclusively and explicitly for testing.
func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error {


+ 9
- 21
internal/state/helpers_test.go View File

@ -11,16 +11,13 @@ import (
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state"
sf "github.com/tendermint/tendermint/internal/state/test/factory"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
tmtime "github.com/tendermint/tendermint/libs/time"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
@ -33,12 +30,6 @@ type paramsChangeTestCase struct {
params types.ConsensusParams
}
func newTestApp() proxy.AppConns {
app := &testApp{}
cc := abciclient.NewLocalCreator(app)
return proxy.NewAppConns(cc, log.NewNopLogger(), proxy.NopMetrics())
}
func makeAndCommitGoodBlock(
ctx context.Context,
t *testing.T,
@ -155,9 +146,7 @@ func makeHeaderPartsResponsesValPubKeyChange(
block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
require.NoError(t, err)
abciResponses := &tmstate.ABCIResponses{
FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil},
}
abciResponses := &tmstate.ABCIResponses{}
// If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0)
if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) {
@ -187,10 +176,9 @@ func makeHeaderPartsResponsesValPowerChange(
block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
require.NoError(t, err)
abciResponses := &tmstate.ABCIResponses{
FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil},
}
abciResponses := &tmstate.ABCIResponses{}
abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{}
// If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0)
if val.VotingPower != power {
@ -296,15 +284,15 @@ func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
}
func (app *testApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
app.CommitVotes = req.LastCommitInfo.Votes
app.CommitVotes = req.DecidedLastCommit.Votes
app.ByzantineValidators = req.ByzantineValidators
resTxs := make([]*abci.ResponseDeliverTx, len(req.Txs))
resTxs := make([]*abci.ExecTxResult, len(req.Txs))
for i, tx := range req.Txs {
if len(tx) > 0 {
resTxs[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
resTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK}
} else {
resTxs[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK + 10} // error
resTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK + 10} // error
}
}
@ -315,8 +303,8 @@ func (app *testApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFi
AppVersion: 1,
},
},
Events: []abci.Event{},
Txs: resTxs,
Events: []abci.Event{},
TxResults: resTxs,
}
}


+ 10
- 11
internal/state/indexer/block/kv/kv.go View File

@ -20,7 +20,7 @@ import (
var _ indexer.BlockIndexer = (*BlockerIndexer)(nil)
// BlockerIndexer implements a block indexer, indexing BeginBlock and EndBlock
// BlockerIndexer implements a block indexer, indexing FinalizeBlock
// events with an underlying KV store. Block events are indexed by their height,
// such that matching search criteria returns the respective block height(s).
type BlockerIndexer struct {
@ -44,12 +44,11 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) {
return idx.store.Has(key)
}
// Index indexes BeginBlock and EndBlock events for a given block by its height.
// Index indexes FinalizeBlock events for a given block by its height.
// The following is indexed:
//
// primary key: encode(block.height | height) => encode(height)
// BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height)
// EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height)
// FinalizeBlock events: encode(eventType.eventAttr|eventValue|height|finalize_block) => encode(height)
func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error {
batch := idx.store.NewBatch()
defer batch.Close()
@ -65,19 +64,19 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error {
return err
}
// 2. index BeginBlock events
if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, "finalize_block", height); err != nil {
// 2. index FinalizeBlock events
if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, types.EventTypeFinalizeBlock, height); err != nil {
return fmt.Errorf("failed to index FinalizeBlock events: %w", err)
}
return batch.WriteSync()
}
// Search performs a query for block heights that match a given BeginBlock
// and Endblock event search criteria. The given query can match against zero,
// one or more block heights. In the case of height queries, i.e. block.height=H,
// if the height is indexed, that height alone will be returned. An error and
// nil slice is returned. Otherwise, a non-nil slice and nil error is returned.
// Search performs a query for block heights that match a given FinalizeBlock
// The given query can match against zero or more block heights. In the case
// of height queries, i.e. block.height=H, if the height is indexed, that height
// alone will be returned. An error and nil slice is returned. Otherwise, a
// non-nil slice and nil error is returned.
func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) {
results := make([]int64, 0)
select {


+ 6
- 6
internal/state/indexer/block/kv/kv_test.go View File

@ -92,19 +92,19 @@ func TestBlockIndexer(t *testing.T) {
q: query.MustCompile(`block.height = 5`),
results: []int64{5},
},
"begin_event.key1 = 'value1'": {
"finalize_event.key1 = 'value1'": {
q: query.MustCompile(`finalize_event1.key1 = 'value1'`),
results: []int64{},
},
"begin_event.proposer = 'FCAA001'": {
"finalize_event.proposer = 'FCAA001'": {
q: query.MustCompile(`finalize_event1.proposer = 'FCAA001'`),
results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
},
"end_event.foo <= 5": {
"finalize_event.foo <= 5": {
q: query.MustCompile(`finalize_event2.foo <= 5`),
results: []int64{2, 4},
},
"end_event.foo >= 100": {
"finalize_event.foo >= 100": {
q: query.MustCompile(`finalize_event2.foo >= 100`),
results: []int64{1},
},
@ -112,11 +112,11 @@ func TestBlockIndexer(t *testing.T) {
q: query.MustCompile(`block.height > 2 AND finalize_event2.foo <= 8`),
results: []int64{4, 6, 8},
},
"begin_event.proposer CONTAINS 'FFFFFFF'": {
"finalize_event.proposer CONTAINS 'FFFFFFF'": {
q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FFFFFFF'`),
results: []int64{},
},
"begin_event.proposer CONTAINS 'FCAA001'": {
"finalize_event.proposer CONTAINS 'FCAA001'": {
q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FCAA001'`),
results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
},


+ 3
- 3
internal/state/indexer/indexer.go View File

@ -30,11 +30,11 @@ type BlockIndexer interface {
// upon database query failure.
Has(height int64) (bool, error)
// Index indexes BeginBlock and EndBlock events for a given block by its height.
// Index indexes FinalizeBlock events for a given block by its height.
Index(types.EventDataNewBlockHeader) error
// Search performs a query for block heights that match a given BeginBlock
// and Endblock event search criteria.
// Search performs a query for block heights that match a given FinalizeBlock
// event search criteria.
Search(ctx context.Context, q *query.Query) ([]int64, error)
}


+ 2
- 2
internal/state/indexer/indexer_service_test.go View File

@ -80,7 +80,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
Height: 1,
Index: uint32(0),
Tx: types.Tx("foo"),
Result: abci.ResponseDeliverTx{Code: 0},
Result: abci.ExecTxResult{Code: 0},
}
err = eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: *txResult1})
require.NoError(t, err)
@ -88,7 +88,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
Height: 1,
Index: uint32(1),
Tx: types.Tx("bar"),
Result: abci.ResponseDeliverTx{Code: 0},
Result: abci.ExecTxResult{Code: 0},
}
err = eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: *txResult2})
require.NoError(t, err)


+ 1
- 1
internal/state/indexer/sink/kv/kv_test.go View File

@ -338,7 +338,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult {
Height: 1,
Index: 0,
Tx: tx,
Result: abci.ResponseDeliverTx{
Result: abci.ExecTxResult{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",


+ 2
- 22
internal/state/indexer/sink/psql/psql_test.go View File

@ -46,8 +46,7 @@ const (
dbName = "postgres"
chainID = "test-chainID"
viewBlockEvents = "block_events"
viewTxEvents = "tx_events"
viewTxEvents = "tx_events"
)
func TestMain(m *testing.M) {
@ -266,7 +265,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult {
Height: 1,
Index: 0,
Tx: types.Tx("HELLO WORLD"),
Result: abci.ResponseDeliverTx{
Result: abci.ExecTxResult{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",
@ -309,25 +308,6 @@ SELECT height FROM `+tableBlocks+` WHERE height = $1;
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
// Verify the presence of begin_block and end_block events.
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %c", err)
}
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, types.EventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", types.EventTypeEndBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
}
// verifyNotImplemented calls f and verifies that it returns both a


+ 1
- 1
internal/state/indexer/tx/kv/kv_bench_test.go View File

@ -43,7 +43,7 @@ func BenchmarkTxSearch(b *testing.B) {
Height: int64(i),
Index: 0,
Tx: types.Tx(string(txBz)),
Result: abci.ResponseDeliverTx{
Result: abci.ExecTxResult{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save