Browse Source

Merge pull request #3695 from tendermint/release/v0.31.6

Release/v0.31.6
pull/3697/head v0.31.6
Ethan Buchman 5 years ago
committed by GitHub
parent
commit
77e711f70b
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
119 changed files with 5584 additions and 1965 deletions
  1. +8
    -1
      .github/PULL_REQUEST_TEMPLATE.md
  2. +70
    -0
      CHANGELOG.md
  3. +1
    -1
      CHANGELOG_PENDING.md
  4. +21
    -17
      CONTRIBUTING.md
  5. +15
    -2
      Gopkg.lock
  6. +4
    -0
      Gopkg.toml
  7. +3
    -3
      Makefile
  8. +10
    -0
      UPGRADING.md
  9. +43
    -43
      abci/types/types.pb.go
  10. +1
    -1
      abci/types/types.proto
  11. +5
    -2
      blockchain/reactor_test.go
  12. +5
    -5
      blockchain/store.go
  13. +11
    -1
      cmd/tendermint/commands/reset_priv_validator.go
  14. +70
    -15
      cmd/tendermint/commands/testnet.go
  15. +13
    -2
      config/config.go
  16. +15
    -4
      config/toml.go
  17. +4
    -1
      consensus/byzantine_test.go
  18. +42
    -11
      consensus/common_test.go
  19. +10
    -4
      consensus/mempool_test.go
  20. +16
    -8
      consensus/reactor.go
  21. +51
    -2
      consensus/reactor_test.go
  22. +51
    -21
      consensus/replay.go
  23. +2
    -1
      consensus/replay_file.go
  24. +482
    -66
      consensus/replay_test.go
  25. +15
    -24
      consensus/state.go
  26. +38
    -33
      consensus/state_test.go
  27. +3
    -5
      consensus/types/height_vote_set.go
  28. +5
    -2
      consensus/wal_generator.go
  29. +71
    -0
      crypto/merkle/simple_tree.go
  30. +36
    -0
      crypto/merkle/simple_tree_test.go
  31. +11
    -4
      crypto/multisig/multisignature.go
  32. +56
    -18
      crypto/multisig/threshold_pubkey_test.go
  33. +2
    -3
      crypto/xsalsa20symmetric/symmetric.go
  34. +1
    -1
      docs/app-dev/subscribing-to-events-via-websocket.md
  35. +40
    -20
      docs/architecture/adr-025-commit.md
  36. +100
    -0
      docs/architecture/adr-037-deliver-block.md
  37. +534
    -0
      docs/architecture/adr-040-blockchain-reactor-refactor.md
  38. +29
    -0
      docs/architecture/adr-041-proposer-selection-via-abci.md
  39. BIN
      docs/architecture/img/bc-reactor-refactor.png
  40. BIN
      docs/architecture/img/bc-reactor.png
  41. +5
    -7
      docs/introduction/install.md
  42. +9
    -7
      docs/networks/terraform-and-ansible.md
  43. +1
    -1
      docs/spec/abci/apps.md
  44. +6
    -6
      docs/spec/p2p/config.md
  45. +11
    -8
      docs/spec/reactors/pex/pex.md
  46. +13
    -2
      docs/tendermint-core/configuration.md
  47. +1
    -1
      docs/tendermint-core/running-in-production.md
  48. +0
    -5
      evidence/reactor.go
  49. +1
    -1
      libs/README.md
  50. +3
    -1
      libs/clist/clist_test.go
  51. +0
    -32
      libs/common/errors.go
  52. +1
    -1
      libs/common/random.go
  53. +1
    -2
      libs/common/service.go
  54. +1
    -0
      libs/db/backend_test.go
  55. +353
    -0
      libs/db/boltdb.go
  56. +37
    -0
      libs/db/boltdb_test.go
  57. +1
    -1
      libs/db/c_level_db.go
  58. +3
    -3
      libs/db/c_level_db_test.go
  59. +66
    -0
      libs/db/common_test.go
  60. +27
    -7
      libs/db/db.go
  61. +1
    -1
      libs/db/fsdb.go
  62. +4
    -6
      libs/db/go_level_db.go
  63. +17
    -78
      libs/db/go_level_db_test.go
  64. +1
    -1
      libs/db/mem_db.go
  65. +1
    -1
      libs/db/remotedb/remotedb_test.go
  66. +5
    -0
      libs/db/util_test.go
  67. +5
    -2
      libs/pubsub/pubsub_test.go
  68. +88
    -13
      lite/proxy/proxy.go
  69. +1
    -1
      lite/proxy/verifier.go
  70. +2
    -1
      mempool/cache_test.go
  71. +712
    -0
      mempool/clist_mempool.go
  72. +50
    -23
      mempool/clist_mempool_test.go
  73. +24
    -0
      mempool/doc.go
  74. +46
    -0
      mempool/errors.go
  75. +69
    -793
      mempool/mempool.go
  76. +22
    -22
      mempool/reactor.go
  77. +23
    -24
      mempool/reactor_test.go
  78. +46
    -0
      mock/mempool.go
  79. +4
    -2
      networks/remote/integration.sh
  80. +319
    -180
      node/node.go
  81. +1
    -1
      node/node_test.go
  82. +21
    -3
      p2p/base_reactor.go
  83. +1
    -1
      p2p/conn/connection.go
  84. +6
    -0
      p2p/conn/secret_connection.go
  85. +1
    -3
      p2p/netaddress.go
  86. +92
    -0
      p2p/peer_behaviour.go
  87. +180
    -0
      p2p/peer_behaviour_test.go
  88. +2
    -2
      p2p/pex/file.go
  89. +80
    -49
      p2p/pex/pex_reactor.go
  90. +73
    -5
      p2p/pex/pex_reactor_test.go
  91. +104
    -36
      p2p/switch.go
  92. +138
    -40
      p2p/switch_test.go
  93. +1
    -1
      p2p/test_util.go
  94. +22
    -6
      p2p/transport.go
  95. +1
    -1
      p2p/trust/store.go
  96. +14
    -3
      privval/socket_listeners_test.go
  97. +126
    -0
      rpc/client/examples_test.go
  98. +1
    -1
      rpc/client/helpers.go
  99. +135
    -52
      rpc/client/httpclient.go
  100. +105
    -4
      rpc/client/rpc_test.go

+ 8
- 1
.github/PULL_REQUEST_TEMPLATE.md View File

@ -1,5 +1,12 @@
<!-- Thanks for filing a PR! Before hitting the button, please check the following items.-->
<!--
Thanks for filing a PR! Before hitting the button, please check the following items.
Please note that every non-trivial PR must reference an issue that explains the
changes in the PR.
-->
* [ ] Referenced an issue explaining the need for the change
* [ ] Updated all relevant documentation in docs
* [ ] Updated all code comments where relevant
* [ ] Wrote tests


+ 70
- 0
CHANGELOG.md View File

@ -1,5 +1,75 @@
# Changelog
## v0.31.6
*May 31st, 2019*
This release contains many fixes and improvements, primarily for p2p functionality.
It also fixes a security issue in the mempool package.
With this release, Tendermint now supports [boltdb](https://github.com/etcd-io/bbolt), although
in experimental mode. Feel free to try and report to us any findings/issues.
Note also that the build tags for compiling CLevelDB have changed.
Special thanks to external contributors on this release:
@guagualvcha, @james-ray, @gregdhill, @climber73, @yutianwu,
@carlosflrs, @defunctzombie, @leoluk, @needkane, @CrocdileChan
### BREAKING CHANGES:
* Go API
- [libs/common] Removed deprecated `PanicSanity`, `PanicCrisis`,
`PanicConsensus` and `PanicQ`
- [mempool, state] [\#2659](https://github.com/tendermint/tendermint/issues/2659) `Mempool` now an interface that lives in the mempool package.
See issue and PR for more details.
- [p2p] [\#3346](https://github.com/tendermint/tendermint/issues/3346) `Reactor#InitPeer` method is added to `Reactor` interface
- [types] [\#1648](https://github.com/tendermint/tendermint/issues/1648) `Commit#VoteSignBytes` signature was changed
### FEATURES:
- [node] [\#2659](https://github.com/tendermint/tendermint/issues/2659) Add `node.Mempool()` method, which allows you to access mempool
- [libs/db] [\#3604](https://github.com/tendermint/tendermint/pull/3604) Add experimental support for bolt db (etcd's fork of bolt) (@CrocdileChan)
### IMPROVEMENTS:
- [cli] [\#3585](https://github.com/tendermint/tendermint/issues/3585) Add `--keep-addr-book` option to `unsafe_reset_all` cmd to not
clear the address book (@climber73)
- [cli] [\#3160](https://github.com/tendermint/tendermint/issues/3160) Add
`--config=<path-to-config>` option to `testnet` cmd (@gregdhill)
- [cli] [\#3661](https://github.com/tendermint/tendermint/pull/3661) Add
`--hostname-suffix`, `--hostname` and `--random-monikers` options to `testnet`
cmd for greater peer address/identity generation flexibility.
- [crypto] [\#3672](https://github.com/tendermint/tendermint/issues/3672) Return more info in the `AddSignatureFromPubKey` error
- [cs/replay] [\#3460](https://github.com/tendermint/tendermint/issues/3460) Check appHash for each block
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
use `make build_c` / `make install_c` (full instructions can be found at
https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support)
* Use `boltdb` tag to compile Tendermint with bolt db
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
when IP lookup fails)
- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer (@guagualvcha)
- [p2p] [\#3531](https://github.com/tendermint/tendermint/issues/3531) Terminate session on nonce wrapping (@climber73)
- [pex] [\#3647](https://github.com/tendermint/tendermint/pull/3647) Dial seeds, if any, instead of crawling peers first (@defunctzombie)
- [rpc] [\#3534](https://github.com/tendermint/tendermint/pull/3534) Add support for batched requests/responses in JSON RPC
- [rpc] [\#3362](https://github.com/tendermint/tendermint/issues/3362) `/dial_seeds` & `/dial_peers` return errors if addresses are
incorrect (except when IP lookup fails)
### BUG FIXES:
- [consensus] [\#3067](https://github.com/tendermint/tendermint/issues/3067) Fix replay from appHeight==0 with validator set changes (@james-ray)
- [consensus] [\#3304](https://github.com/tendermint/tendermint/issues/3304) Create a peer state in consensus reactor before the peer
is started (@guagualvcha)
- [lite] [\#3669](https://github.com/tendermint/tendermint/issues/3669) Add context parameter to RPC Handlers in proxy routes (@yutianwu)
- [mempool] [\#3322](https://github.com/tendermint/tendermint/issues/3322) When a block is committed, only remove committed txs from the mempool
that were valid (ie. `ResponseDeliverTx.Code == 0`)
- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Ensure `RemovePeer` is always called before `InitPeer` (upon a peer
reconnecting to our node)
- [p2p] [\#3532](https://github.com/tendermint/tendermint/issues/3532) Limit the number of attempts to connect to a peer in seed mode
to 16 (as a result, the node will stop retrying after a 35 hours time window)
- [p2p] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Allow inbound peers to be persistent, including for seed nodes.
- [pex] [\#3603](https://github.com/tendermint/tendermint/pull/3603) Dial seeds when addrbook needs more addresses (@defunctzombie)
### OTHERS:
- [networks] fixes ansible integration script (@carlosflrs)
## v0.31.5
*April 16th, 2019*


+ 1
- 1
CHANGELOG_PENDING.md View File

@ -1,4 +1,4 @@
## v0.31.6
## v0.31.7
**


+ 21
- 17
CONTRIBUTING.md View File

@ -4,6 +4,14 @@ Thank you for considering making contributions to Tendermint and related reposit
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
Before making a pull request, please open an issue describing the
change you would like to make. If an issue for your change already exists,
please comment on it that you will submit a pull request. Be sure to reference the issue in the opening
comment of your pull request. If your change is substantial, you will be asked
to write a more detailed design document in the form of an
Architectural Decision Record (ie. see [here](./docs/architecture/)) before submitting code
changes.
Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file.
## Forking
@ -105,10 +113,14 @@ removed from the header in rpc responses as well.
## Branching Model and Release
All repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
We follow a variant of [git flow](http://nvie.com/posts/a-successful-git-branching-model/).
This means that all pull-requests should be made against develop. Any merge to
master constitutes a tagged release.
Note all pull requests should be squash merged except for merging to master and
merging master back to develop. This keeps the commit history clean and makes it
easy to reference the pull request where a change was introduced.
### Development Procedure:
- the latest state of development is on `develop`
- `develop` must never fail `make test`
@ -120,13 +132,13 @@ master constitutes a tagged release.
### Pull Merge Procedure:
- ensure pull branch is based on a recent develop
- run `make test` to ensure that all tests pass
- merge pull request
- squash merge pull request
- the `unstable` branch may be used to aggregate pull merges before fixing tests
### Release Procedure:
- start on `develop`
- run integration tests (see `test_integrations` in Makefile)
- prepare changelog:
- prepare release in a pull request against develop (to be squash merged):
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
@ -135,23 +147,15 @@ master constitutes a tagged release.
the changelog. To lookup an alias from an email, try `bash
./scripts/authors.sh <email>`
- reset the `CHANGELOG_PENDING.md`
- bump versions
- push to release/vX.X.X to run the extended integration tests on the CI
- merge to master
- merge master back to develop
- bump versions
- push latest develop with prepared release details to release/vX.X.X to run the extended integration tests on the CI
- if necessary, make pull requests against release/vX.X.X and squash merge them
- merge to master (don't squash merge!)
- merge master back to develop (don't squash merge!)
### Hotfix Procedure:
- start on `master`
- checkout a new branch named hotfix-vX.X.X
- make the required changes
- these changes should be small and an absolute necessity
- add a note to CHANGELOG.md
- bump versions
- push to hotfix-vX.X.X to run the extended integration tests on the CI
- merge hotfix-vX.X.X to master
- merge hotfix-vX.X.X to develop
- delete the hotfix-vX.X.X branch
- follow the normal development and release procedure without any differences
## Testing


+ 15
- 2
Gopkg.lock View File

@ -34,6 +34,14 @@
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:5f7414cf41466d4b4dd7ec52b2cd3e481e08cfd11e7e24fef730c0e483e88bb1"
name = "github.com/etcd-io/bbolt"
packages = ["."]
pruneopts = "UT"
revision = "63597a96ec0ad9e6d43c3fc81e809909e0237461"
version = "v1.3.2"
[[projects]]
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
name = "github.com/fortytw2/leaktest"
@ -170,9 +178,12 @@
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
digest = "1:53e8c5c79716437e601696140e8b1801aae4204f4ec54a504333702a49572c4f"
name = "github.com/magiconair/properties"
packages = ["."]
packages = [
".",
"assert",
]
pruneopts = "UT"
revision = "c2353362d570a7bfa228149c62842019201cfb71"
version = "v1.8.0"
@ -500,6 +511,7 @@
"github.com/btcsuite/btcd/btcec",
"github.com/btcsuite/btcutil/base58",
"github.com/btcsuite/btcutil/bech32",
"github.com/etcd-io/bbolt",
"github.com/fortytw2/leaktest",
"github.com/go-kit/kit/log",
"github.com/go-kit/kit/log/level",
@ -516,6 +528,7 @@
"github.com/golang/protobuf/ptypes/timestamp",
"github.com/gorilla/websocket",
"github.com/jmhodges/levigo",
"github.com/magiconair/properties/assert",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",


+ 4
- 0
Gopkg.toml View File

@ -22,6 +22,10 @@
###########################################################
# Allow only patch releases for serialization libraries
[[constraint]]
name = "github.com/etcd-io/bbolt"
version = "v1.3.2"
[[constraint]]
name = "github.com/tendermint/go-amino"
version = "~0.14.1"


+ 3
- 3
Makefile View File

@ -23,7 +23,7 @@ build:
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/
build_c:
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/
build_race:
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
@ -32,7 +32,7 @@ install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
install_c:
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" ./cmd/tendermint
########################################
### Protobuf
@ -132,7 +132,7 @@ clean_certs:
rm -f db/remotedb/::.crt db/remotedb/::.key
test_libs: gen_certs
go test -tags gcc $(PACKAGES)
go test -tags clevedb boltdb $(PACKAGES)
make clean_certs
grpc_dbserver:


+ 10
- 0
UPGRADING.md View File

@ -3,6 +3,16 @@
This guide provides steps to be followed when you upgrade your applications to
a newer version of Tendermint Core.
## v0.31.6
There are no breaking changes in this release except Go API of p2p and
mempool packages. Hovewer, if you're using cleveldb, you'll need to change
the compilation tag:
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
use `make build_c` / `make install_c` (full instructions can be found at
https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support)
## v0.31.0
This release contains a breaking change to the behaviour of the pubsub system.


+ 43
- 43
abci/types/types.pb.go View File

@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{0}
return fileDescriptor_types_7e896a7c04915591, []int{0}
}
func (m *Request) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} }
func (m *RequestEcho) String() string { return proto.CompactTextString(m) }
func (*RequestEcho) ProtoMessage() {}
func (*RequestEcho) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{1}
return fileDescriptor_types_7e896a7c04915591, []int{1}
}
func (m *RequestEcho) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} }
func (m *RequestFlush) String() string { return proto.CompactTextString(m) }
func (*RequestFlush) ProtoMessage() {}
func (*RequestFlush) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{2}
return fileDescriptor_types_7e896a7c04915591, []int{2}
}
func (m *RequestFlush) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -571,7 +571,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} }
func (m *RequestInfo) String() string { return proto.CompactTextString(m) }
func (*RequestInfo) ProtoMessage() {}
func (*RequestInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{3}
return fileDescriptor_types_7e896a7c04915591, []int{3}
}
func (m *RequestInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -634,7 +634,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} }
func (m *RequestSetOption) String() string { return proto.CompactTextString(m) }
func (*RequestSetOption) ProtoMessage() {}
func (*RequestSetOption) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{4}
return fileDescriptor_types_7e896a7c04915591, []int{4}
}
func (m *RequestSetOption) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -692,7 +692,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} }
func (m *RequestInitChain) String() string { return proto.CompactTextString(m) }
func (*RequestInitChain) ProtoMessage() {}
func (*RequestInitChain) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{5}
return fileDescriptor_types_7e896a7c04915591, []int{5}
}
func (m *RequestInitChain) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -770,7 +770,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} }
func (m *RequestQuery) String() string { return proto.CompactTextString(m) }
func (*RequestQuery) ProtoMessage() {}
func (*RequestQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{6}
return fileDescriptor_types_7e896a7c04915591, []int{6}
}
func (m *RequestQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -841,7 +841,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} }
func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) }
func (*RequestBeginBlock) ProtoMessage() {}
func (*RequestBeginBlock) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{7}
return fileDescriptor_types_7e896a7c04915591, []int{7}
}
func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -909,7 +909,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} }
func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) }
func (*RequestCheckTx) ProtoMessage() {}
func (*RequestCheckTx) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{8}
return fileDescriptor_types_7e896a7c04915591, []int{8}
}
func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -956,7 +956,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} }
func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) }
func (*RequestDeliverTx) ProtoMessage() {}
func (*RequestDeliverTx) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{9}
return fileDescriptor_types_7e896a7c04915591, []int{9}
}
func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1003,7 +1003,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} }
func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) }
func (*RequestEndBlock) ProtoMessage() {}
func (*RequestEndBlock) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{10}
return fileDescriptor_types_7e896a7c04915591, []int{10}
}
func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1049,7 +1049,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} }
func (m *RequestCommit) String() string { return proto.CompactTextString(m) }
func (*RequestCommit) ProtoMessage() {}
func (*RequestCommit) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{11}
return fileDescriptor_types_7e896a7c04915591, []int{11}
}
func (m *RequestCommit) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1102,7 +1102,7 @@ func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{12}
return fileDescriptor_types_7e896a7c04915591, []int{12}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1555,7 +1555,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} }
func (m *ResponseException) String() string { return proto.CompactTextString(m) }
func (*ResponseException) ProtoMessage() {}
func (*ResponseException) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{13}
return fileDescriptor_types_7e896a7c04915591, []int{13}
}
func (m *ResponseException) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1602,7 +1602,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} }
func (m *ResponseEcho) String() string { return proto.CompactTextString(m) }
func (*ResponseEcho) ProtoMessage() {}
func (*ResponseEcho) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{14}
return fileDescriptor_types_7e896a7c04915591, []int{14}
}
func (m *ResponseEcho) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1648,7 +1648,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} }
func (m *ResponseFlush) String() string { return proto.CompactTextString(m) }
func (*ResponseFlush) ProtoMessage() {}
func (*ResponseFlush) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{15}
return fileDescriptor_types_7e896a7c04915591, []int{15}
}
func (m *ResponseFlush) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1692,7 +1692,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} }
func (m *ResponseInfo) String() string { return proto.CompactTextString(m) }
func (*ResponseInfo) ProtoMessage() {}
func (*ResponseInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{16}
return fileDescriptor_types_7e896a7c04915591, []int{16}
}
func (m *ResponseInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1771,7 +1771,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} }
func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) }
func (*ResponseSetOption) ProtoMessage() {}
func (*ResponseSetOption) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{17}
return fileDescriptor_types_7e896a7c04915591, []int{17}
}
func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1833,7 +1833,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} }
func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) }
func (*ResponseInitChain) ProtoMessage() {}
func (*ResponseInitChain) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{18}
return fileDescriptor_types_7e896a7c04915591, []int{18}
}
func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1896,7 +1896,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} }
func (m *ResponseQuery) String() string { return proto.CompactTextString(m) }
func (*ResponseQuery) ProtoMessage() {}
func (*ResponseQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{19}
return fileDescriptor_types_7e896a7c04915591, []int{19}
}
func (m *ResponseQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -1999,7 +1999,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} }
func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) }
func (*ResponseBeginBlock) ProtoMessage() {}
func (*ResponseBeginBlock) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{20}
return fileDescriptor_types_7e896a7c04915591, []int{20}
}
func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2053,7 +2053,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) }
func (*ResponseCheckTx) ProtoMessage() {}
func (*ResponseCheckTx) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{21}
return fileDescriptor_types_7e896a7c04915591, []int{21}
}
func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2156,7 +2156,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} }
func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) }
func (*ResponseDeliverTx) ProtoMessage() {}
func (*ResponseDeliverTx) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{22}
return fileDescriptor_types_7e896a7c04915591, []int{22}
}
func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2254,7 +2254,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} }
func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) }
func (*ResponseEndBlock) ProtoMessage() {}
func (*ResponseEndBlock) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{23}
return fileDescriptor_types_7e896a7c04915591, []int{23}
}
func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2316,7 +2316,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} }
func (m *ResponseCommit) String() string { return proto.CompactTextString(m) }
func (*ResponseCommit) ProtoMessage() {}
func (*ResponseCommit) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{24}
return fileDescriptor_types_7e896a7c04915591, []int{24}
}
func (m *ResponseCommit) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2367,7 +2367,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} }
func (m *ConsensusParams) String() string { return proto.CompactTextString(m) }
func (*ConsensusParams) ProtoMessage() {}
func (*ConsensusParams) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{25}
return fileDescriptor_types_7e896a7c04915591, []int{25}
}
func (m *ConsensusParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2417,7 +2417,7 @@ func (m *ConsensusParams) GetValidator() *ValidatorParams {
return nil
}
// BlockParams contains limits on the block size and timestamp.
// BlockParams contains limits on the block size.
type BlockParams struct {
// Note: must be greater than 0
MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"`
@ -2432,7 +2432,7 @@ func (m *BlockParams) Reset() { *m = BlockParams{} }
func (m *BlockParams) String() string { return proto.CompactTextString(m) }
func (*BlockParams) ProtoMessage() {}
func (*BlockParams) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{26}
return fileDescriptor_types_7e896a7c04915591, []int{26}
}
func (m *BlockParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2488,7 +2488,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} }
func (m *EvidenceParams) String() string { return proto.CompactTextString(m) }
func (*EvidenceParams) ProtoMessage() {}
func (*EvidenceParams) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{27}
return fileDescriptor_types_7e896a7c04915591, []int{27}
}
func (m *EvidenceParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2536,7 +2536,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} }
func (m *ValidatorParams) String() string { return proto.CompactTextString(m) }
func (*ValidatorParams) ProtoMessage() {}
func (*ValidatorParams) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{28}
return fileDescriptor_types_7e896a7c04915591, []int{28}
}
func (m *ValidatorParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2584,7 +2584,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} }
func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) }
func (*LastCommitInfo) ProtoMessage() {}
func (*LastCommitInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{29}
return fileDescriptor_types_7e896a7c04915591, []int{29}
}
func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2658,7 +2658,7 @@ func (m *Header) Reset() { *m = Header{} }
func (m *Header) String() string { return proto.CompactTextString(m) }
func (*Header) ProtoMessage() {}
func (*Header) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{30}
return fileDescriptor_types_7e896a7c04915591, []int{30}
}
func (m *Header) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2811,7 +2811,7 @@ func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (*Version) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{31}
return fileDescriptor_types_7e896a7c04915591, []int{31}
}
func (m *Version) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2866,7 +2866,7 @@ func (m *BlockID) Reset() { *m = BlockID{} }
func (m *BlockID) String() string { return proto.CompactTextString(m) }
func (*BlockID) ProtoMessage() {}
func (*BlockID) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{32}
return fileDescriptor_types_7e896a7c04915591, []int{32}
}
func (m *BlockID) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2921,7 +2921,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} }
func (m *PartSetHeader) String() string { return proto.CompactTextString(m) }
func (*PartSetHeader) ProtoMessage() {}
func (*PartSetHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{33}
return fileDescriptor_types_7e896a7c04915591, []int{33}
}
func (m *PartSetHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -2978,7 +2978,7 @@ func (m *Validator) Reset() { *m = Validator{} }
func (m *Validator) String() string { return proto.CompactTextString(m) }
func (*Validator) ProtoMessage() {}
func (*Validator) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{34}
return fileDescriptor_types_7e896a7c04915591, []int{34}
}
func (m *Validator) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -3034,7 +3034,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} }
func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) }
func (*ValidatorUpdate) ProtoMessage() {}
func (*ValidatorUpdate) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{35}
return fileDescriptor_types_7e896a7c04915591, []int{35}
}
func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -3090,7 +3090,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} }
func (m *VoteInfo) String() string { return proto.CompactTextString(m) }
func (*VoteInfo) ProtoMessage() {}
func (*VoteInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{36}
return fileDescriptor_types_7e896a7c04915591, []int{36}
}
func (m *VoteInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -3145,7 +3145,7 @@ func (m *PubKey) Reset() { *m = PubKey{} }
func (m *PubKey) String() string { return proto.CompactTextString(m) }
func (*PubKey) ProtoMessage() {}
func (*PubKey) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{37}
return fileDescriptor_types_7e896a7c04915591, []int{37}
}
func (m *PubKey) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -3203,7 +3203,7 @@ func (m *Evidence) Reset() { *m = Evidence{} }
func (m *Evidence) String() string { return proto.CompactTextString(m) }
func (*Evidence) ProtoMessage() {}
func (*Evidence) Descriptor() ([]byte, []int) {
return fileDescriptor_types_a177e47fab90f91d, []int{38}
return fileDescriptor_types_7e896a7c04915591, []int{38}
}
func (m *Evidence) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -15357,12 +15357,12 @@ var (
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d) }
func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_7e896a7c04915591) }
func init() {
golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d)
golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_7e896a7c04915591)
}
var fileDescriptor_types_a177e47fab90f91d = []byte{
var fileDescriptor_types_7e896a7c04915591 = []byte{
// 2203 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x73, 0x1c, 0x47,
0xf5, 0xd7, 0xec, 0xef, 0x79, 0xab, 0xfd, 0xe1, 0xb6, 0x6c, 0xaf, 0xf7, 0x9b, 0xaf, 0xe4, 0x1a,


+ 1
- 1
abci/types/types.proto View File

@ -212,7 +212,7 @@ message ConsensusParams {
ValidatorParams validator = 3;
}
// BlockParams contains limits on the block size and timestamp.
// BlockParams contains limits on the block size.
message BlockParams {
// Note: must be greater than 0
int64 max_bytes = 1;


+ 5
- 2
blockchain/reactor_test.go View File

@ -13,6 +13,7 @@ import (
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
@ -91,8 +92,10 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
// NOTE we have to create and commit the blocks first because
// pool.height is determined from the store.
fastSync := true
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(),
sm.MockMempool{}, sm.MockEvidencePool{})
db := dbm.NewMemDB()
blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.MockEvidencePool{})
sm.SaveState(db, state)
// let's add some blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {


+ 5
- 5
blockchain/store.go View File

@ -144,14 +144,14 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
// most recent height. Otherwise they'd stall at H-1.
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
if block == nil {
cmn.PanicSanity("BlockStore can only save a non-nil block")
panic("BlockStore can only save a non-nil block")
}
height := block.Height
if g, w := height, bs.Height()+1; g != w {
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
}
if !blockParts.IsComplete() {
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets"))
panic(fmt.Sprintf("BlockStore can only save complete block part sets"))
}
// Save block meta
@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 {
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := cdc.MustMarshalBinaryBare(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@ -224,7 +224,7 @@ type BlockStoreStateJSON struct {
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := cdc.MarshalJSON(bsj)
if err != nil {
cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err))
panic(fmt.Sprintf("Could not marshal state bytes: %v", err))
}
db.SetSync(blockStoreKey, bytes)
}


+ 11
- 1
cmd/tendermint/commands/reset_priv_validator.go View File

@ -18,6 +18,12 @@ var ResetAllCmd = &cobra.Command{
Run: resetAll,
}
var keepAddrBook bool
func init() {
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "Keep the address book intact")
}
// ResetPrivValidatorCmd resets the private validator files.
var ResetPrivValidatorCmd = &cobra.Command{
Use: "unsafe_reset_priv_validator",
@ -41,7 +47,11 @@ func resetPrivValidator(cmd *cobra.Command, args []string) {
// ResetAll removes address book files plus all data, and resets the privValdiator data.
// Exported so other CLI tools can use it.
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
removeAddrBook(addrBookFile, logger)
if keepAddrBook {
logger.Info("The address book remains intact")
} else {
removeAddrBook(addrBookFile, logger)
}
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {


+ 70
- 15
cmd/tendermint/commands/testnet.go View File

@ -8,6 +8,7 @@ import (
"strings"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
@ -20,13 +21,17 @@ import (
var (
nValidators int
nNonValidators int
configFile string
outputDir string
nodeDirPrefix string
populatePersistentPeers bool
hostnamePrefix string
hostnameSuffix string
startingIPAddress string
hostnames []string
p2pPort int
randomMonikers bool
)
const (
@ -36,6 +41,8 @@ const (
func init() {
TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4,
"Number of validators to initialize the testnet with")
TestnetFilesCmd.Flags().StringVar(&configFile, "config", "",
"Config file to use (note some options may be overwritten)")
TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0,
"Number of non-validators to initialize the testnet with")
TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet",
@ -46,11 +53,17 @@ func init() {
TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
"Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
"Hostname prefix (node results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)")
"Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)")
TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "",
"Hostname suffix (\".xyz.com\" results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)")
TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
"Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
"Starting IP address (\"192.168.0.1\" results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{},
"Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)")
TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656,
"P2P Port")
TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false,
"Randomize the moniker for each generated node")
}
// TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
@ -72,7 +85,29 @@ Example:
}
func testnetFiles(cmd *cobra.Command, args []string) error {
if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) {
return fmt.Errorf(
"testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used",
nValidators+nNonValidators,
)
}
config := cfg.DefaultConfig()
// overwrite default config if set and valid
if configFile != "" {
viper.SetConfigFile(configFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
if err := viper.Unmarshal(config); err != nil {
return err
}
if err := config.ValidateBasic(); err != nil {
return err
}
}
genVals := make([]types.GenesisValidator, nValidators)
for i := 0; i < nValidators; i++ {
@ -162,6 +197,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
if populatePersistentPeers {
config.P2P.PersistentPeers = persistentPeers
}
config.Moniker = moniker(i)
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
}
@ -171,21 +207,23 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
}
func hostnameOrIP(i int) string {
if startingIPAddress != "" {
ip := net.ParseIP(startingIPAddress)
ip = ip.To4()
if ip == nil {
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
os.Exit(1)
}
for j := 0; j < i; j++ {
ip[3]++
}
return ip.String()
if len(hostnames) > 0 && i < len(hostnames) {
return hostnames[i]
}
if startingIPAddress == "" {
return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix)
}
ip := net.ParseIP(startingIPAddress)
ip = ip.To4()
if ip == nil {
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
os.Exit(1)
}
return fmt.Sprintf("%s%d", hostnamePrefix, i)
for j := 0; j < i; j++ {
ip[3]++
}
return ip.String()
}
func persistentPeersString(config *cfg.Config) (string, error) {
@ -201,3 +239,20 @@ func persistentPeersString(config *cfg.Config) (string, error) {
}
return strings.Join(persistentPeers, ","), nil
}
func moniker(i int) string {
if randomMonikers {
return randomMoniker()
}
if len(hostnames) > 0 && i < len(hostnames) {
return hostnames[i]
}
if startingIPAddress == "" {
return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix)
}
return randomMoniker()
}
func randomMoniker() string {
return cmn.HexBytes(cmn.RandBytes(8)).String()
}

+ 13
- 2
config/config.go View File

@ -153,7 +153,18 @@ type BaseConfig struct {
// and verifying their commits
FastSync bool `mapstructure:"fast_sync"`
// Database backend: leveldb | memdb | cleveldb
// Database backend: goleveldb | cleveldb | boltdb
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
// - pure go
// - stable
// * cleveldb (uses levigo wrapper)
// - fast
// - requires gcc
// - use cleveldb build tag (go build -tags cleveldb)
// * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
// - EXPERIMENTAL
// - may be faster is some use-cases (random reads - indexer)
// - use boltdb build tag (go build -tags boltdb)
DBBackend string `mapstructure:"db_backend"`
// Database directory
@ -207,7 +218,7 @@ func DefaultBaseConfig() BaseConfig {
ProfListenAddress: "",
FastSync: true,
FilterPeers: false,
DBBackend: "leveldb",
DBBackend: "goleveldb",
DBPath: "data",
}
}


+ 15
- 4
config/toml.go View File

@ -28,13 +28,13 @@ func init() {
// and panics if it fails.
func EnsureRoot(rootDir string) {
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
panic(err.Error())
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
panic(err.Error())
}
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
cmn.PanicSanity(err.Error())
panic(err.Error())
}
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
@ -81,7 +81,18 @@ moniker = "{{ .BaseConfig.Moniker }}"
# and verifying their commits
fast_sync = {{ .BaseConfig.FastSync }}
# Database backend: leveldb | memdb | cleveldb
# Database backend: goleveldb | cleveldb | boltdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
db_backend = "{{ .BaseConfig.DBBackend }}"
# Database directory


+ 4
- 1
consensus/byzantine_test.go View File

@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@ -69,7 +70,7 @@ func TestByzantine(t *testing.T) {
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
require.NoError(t, err)
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
conR := NewConsensusReactor(css[i], true) // so we don't start the consensus states
conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus)
@ -81,6 +82,7 @@ func TestByzantine(t *testing.T) {
}
reactors[i] = conRI
sm.SaveState(css[i].blockExec.DB(), css[i].state) //for save height 1's validators info
}
defer func() {
@ -268,3 +270,4 @@ func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes)
}
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }

+ 42
- 11
consensus/common_test.go View File

@ -14,6 +14,8 @@ import (
"github.com/go-kit/kit/log/term"
"path"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
@ -119,6 +121,24 @@ func incrementRound(vss ...*validatorStub) {
}
}
type ValidatorStubsByAddress []*validatorStub
func (vss ValidatorStubsByAddress) Len() int {
return len(vss)
}
func (vss ValidatorStubsByAddress) Less(i, j int) bool {
return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1
}
func (vss ValidatorStubsByAddress) Swap(i, j int) {
it := vss[i]
vss[i] = vss[j]
vss[i].Index = i
vss[j] = it
vss[j].Index = j
}
//-------------------------------------------------------------------------------
// Functions for transitioning the consensus state
@ -228,7 +248,7 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
}
func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote)
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote)
if err != nil {
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
}
@ -268,7 +288,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
@ -278,7 +298,8 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
evpool := sm.MockEvidencePool{}
// Make ConsensusState
stateDB := dbm.NewMemDB()
stateDB := blockDB
sm.SaveState(stateDB, state) //for save height 1's validators info
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
@ -351,7 +372,7 @@ func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) {
}
func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) {
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
ensureNoNewEvent(
stepCh,
timeoutDuration,
@ -398,7 +419,7 @@ func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) {
}
func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) {
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
"Timeout expired while waiting for NewTimeout event")
}
@ -564,7 +585,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
css[i] = newConsensusStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
@ -576,12 +597,11 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
css := make([]*ConsensusState, nPeers)
logger := consensusLogger()
var peer0Config *cfg.Config
configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
@ -589,6 +609,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
if i == 0 {
peer0Config = thisConfig
}
var privVal types.PrivValidator
if i < nValidators {
privVal = privVals[i]
@ -605,15 +628,19 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
}
app := appFunc()
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
vals := types.TM2PB.ValidatorUpdates(state.Validators)
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version. If don't do this, replay test will fail
}
app.InitChain(abci.RequestInitChain{Validators: vals})
//sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
return css, func() {
return css, genDoc, peer0Config, func() {
for _, dir := range configRootDirs {
os.RemoveAll(dir)
}
@ -719,3 +746,7 @@ func newPersistentKVStore() abci.Application {
}
return kvstore.NewPersistentKVStoreApplication(dir)
}
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
return kvstore.NewPersistentKVStoreApplication(dbDir)
}

+ 10
- 4
consensus/mempool_test.go View File

@ -11,13 +11,15 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
// for testing
func assertMempool(txn txNotifier) sm.Mempool {
return txn.(sm.Mempool)
func assertMempool(txn txNotifier) mempl.Mempool {
return txn.(mempl.Mempool)
}
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
@ -106,7 +108,9 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusState(state, privVals[0], NewCounterApplication())
blockDB := dbm.NewMemDB()
cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
sm.SaveState(blockDB, state)
height, round := cs.Height, cs.Round
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
@ -129,7 +133,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
func TestMempoolRmBadTx(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
app := NewCounterApplication()
cs := newConsensusState(state, privVals[0], app)
blockDB := dbm.NewMemDB()
cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
sm.SaveState(blockDB, state)
// increment the counter by 1
txBytes := make([]byte, 8)


+ 16
- 8
consensus/reactor.go View File

@ -155,16 +155,24 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
}
}
// AddPeer implements Reactor
// InitPeer implements Reactor by creating a state for the peer.
func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer {
peerState := NewPeerState(peer).SetLogger(conR.Logger)
peer.Set(types.PeerStateKey, peerState)
return peer
}
// AddPeer implements Reactor by spawning multiple gossiping goroutines for the
// peer.
func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
if !conR.IsRunning() {
return
}
// Create peerState for peer
peerState := NewPeerState(peer).SetLogger(conR.Logger)
peer.Set(types.PeerStateKey, peerState)
peerState, ok := peer.Get(types.PeerStateKey).(*PeerState)
if !ok {
panic(fmt.Sprintf("peer %v has no state", peer))
}
// Begin routines for this peer.
go conR.gossipDataRoutine(peer, peerState)
go conR.gossipVotesRoutine(peer, peerState)
@ -177,7 +185,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
}
}
// RemovePeer implements Reactor
// RemovePeer is a noop.
func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
if !conR.IsRunning() {
return
@ -491,7 +499,7 @@ OUTER_LOOP:
if prs.ProposalBlockParts == nil {
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
panic(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
prs.Height, conR.conS.blockStore.Height()))
}
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
@ -1110,7 +1118,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
NOTE: This is wrong, 'round' could change.
e.g. if orig round is not the same as block LastCommit round.
if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
panic(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
}
*/
if ps.PRS.CatchupCommitRound == round {


+ 51
- 2
consensus/reactor_test.go View File

@ -23,6 +23,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/mock"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@ -51,6 +52,10 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
require.NoError(t, err)
blocksSubs = append(blocksSubs, blocksSub)
if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake
sm.SaveState(css[i].blockExec.DB(), css[i].state)
}
}
// make connected switches and start all reactors
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
@ -136,7 +141,7 @@ func TestReactorWithEvidence(t *testing.T) {
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
@ -239,6 +244,49 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
}, css)
}
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, _, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
var (
reactor = reactors[0]
peer = mock.NewPeer(nil)
msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
)
reactor.InitPeer(peer)
// simulate switch calling Receive before AddPeer
assert.NotPanics(t, func() {
reactor.Receive(StateChannel, peer, msg)
reactor.AddPeer(peer)
})
}
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, _, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
var (
reactor = reactors[0]
peer = mock.NewPeer(nil)
msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
)
// we should call InitPeer here
// simulate switch calling Receive before AddPeer
assert.Panics(t, func() {
reactor.Receive(StateChannel, peer, msg)
})
}
// Test we record stats about votes and block parts from other peers.
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
N := 4
@ -329,7 +377,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
func TestReactorValidatorSetChanges(t *testing.T) {
nPeers := 7
nVals := 4
css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
css, _, _, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
defer cleanup()
logger := log.TestingLogger()


+ 51
- 21
consensus/replay.go View File

@ -13,10 +13,10 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
//auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@ -230,6 +230,7 @@ func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
h.eventBus = eventBus
}
// NBlocks returns the number of blocks applied to the state.
func (h *Handshaker) NBlocks() int {
return h.nBlocks
}
@ -257,13 +258,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
)
// Set AppVersion on the state.
h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion)
sm.SaveState(h.stateDB, h.initialState)
if h.initialState.Version.Consensus.App != version.Protocol(res.AppVersion) {
h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion)
sm.SaveState(h.stateDB, h.initialState)
}
// Replay blocks up to the latest in the blockstore.
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
if err != nil {
return fmt.Errorf("Error on replay: %v", err)
return fmt.Errorf("error on replay: %v", err)
}
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
@ -274,7 +277,8 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
return nil
}
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
// ReplayBlocks replays all blocks since appBlockHeight and ensures the result
// matches the current state.
// Returns the final AppHash or an error.
func (h *Handshaker) ReplayBlocks(
state sm.State,
@ -319,7 +323,7 @@ func (h *Handshaker) ReplayBlocks(
} else {
// If validator set is not set in genesis and still empty after InitChain, exit.
if len(h.genDoc.Validators) == 0 {
return nil, fmt.Errorf("Validator set is nil in genesis and still empty after InitChain")
return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain")
}
}
@ -332,7 +336,8 @@ func (h *Handshaker) ReplayBlocks(
// First handle edge cases and constraints on the storeBlockHeight.
if storeBlockHeight == 0 {
return appHash, checkAppHash(state, appHash)
assertAppHashEqualsOneFromState(appHash, state)
return appHash, nil
} else if storeBlockHeight < appBlockHeight {
// the app should never be ahead of the store (but this is under app's control)
@ -340,11 +345,11 @@ func (h *Handshaker) ReplayBlocks(
} else if storeBlockHeight < stateBlockHeight {
// the state should never be ahead of the store (this is under tendermint's control)
cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
} else if storeBlockHeight > stateBlockHeight+1 {
// store should be at most one ahead of the state (this is under tendermint's control)
cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
}
var err error
@ -359,7 +364,8 @@ func (h *Handshaker) ReplayBlocks(
} else if appBlockHeight == storeBlockHeight {
// We're good!
return appHash, checkAppHash(state, appHash)
assertAppHashEqualsOneFromState(appHash, state)
return appHash, nil
}
} else if storeBlockHeight == stateBlockHeight+1 {
@ -380,7 +386,7 @@ func (h *Handshaker) ReplayBlocks(
return state.AppHash, err
} else if appBlockHeight == storeBlockHeight {
// We ran Commit, but didn't save the state, so replayBlock with mock app
// We ran Commit, but didn't save the state, so replayBlock with mock app.
abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight)
if err != nil {
return nil, err
@ -393,8 +399,8 @@ func (h *Handshaker) ReplayBlocks(
}
cmn.PanicSanity("Should never happen")
return nil, nil
panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d",
appBlockHeight, storeBlockHeight, stateBlockHeight))
}
func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) {
@ -417,7 +423,12 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
for i := appBlockHeight + 1; i <= finalBlock; i++ {
h.logger.Info("Applying block", "height", i)
block := h.store.LoadBlock(i)
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB)
// Extra check to ensure the app was not changed in a way it shouldn't have.
if len(appHash) > 0 {
assertAppHashEqualsOneFromBlock(appHash, block)
}
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB)
if err != nil {
return nil, err
}
@ -434,7 +445,8 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
appHash = state.AppHash
}
return appHash, checkAppHash(state, appHash)
assertAppHashEqualsOneFromState(appHash, state)
return appHash, nil
}
// ApplyBlock on the proxyApp with the last block.
@ -442,7 +454,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
block := h.store.LoadBlock(height)
meta := h.store.LoadBlockMeta(height)
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, mock.Mempool{}, sm.MockEvidencePool{})
blockExec.SetEventBus(h.eventBus)
var err error
@ -456,11 +468,26 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
return state, nil
}
func checkAppHash(state sm.State, appHash []byte) error {
if !bytes.Equal(state.AppHash, appHash) {
panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, state.AppHash).Error())
func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) {
if !bytes.Equal(appHash, block.AppHash) {
panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X.
Block: %v
`,
appHash, block.AppHash, block))
}
}
func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) {
if !bytes.Equal(appHash, state.AppHash) {
panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got
%X, expected %X.
State: %v
Did you reset Tendermint without resetting your application's data?`,
appHash, state.AppHash, state))
}
return nil
}
//--------------------------------------------------------------------------------
@ -491,6 +518,9 @@ type mockProxyApp struct {
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTx[mock.txCount]
mock.txCount++
if r == nil { //it could be nil because of amino unMarshall, it will cause an empty ResponseDeliverTx to become nil
return abci.ResponseDeliverTx{}
}
return *r
}


+ 2
- 1
consensus/replay_file.go View File

@ -16,6 +16,7 @@ import (
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@ -312,7 +313,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
}
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
mempool, evpool := mock.Mempool{}, sm.MockEvidencePool{}
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
consensusState := NewConsensusState(csConfig, state.Copy(), blockExec,


+ 482
- 66
consensus/replay_test.go View File

@ -7,7 +7,7 @@ import (
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@ -15,16 +15,21 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"sort"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
)
@ -88,7 +93,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *
}
}
func sendTxs(cs *ConsensusState, ctx context.Context) {
func sendTxs(ctx context.Context, cs *ConsensusState) {
for i := 0; i < 256; i++ {
select {
case <-ctx.Done():
@ -113,7 +118,7 @@ func TestWALCrash(t *testing.T) {
1},
{"many non-empty blocks",
func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {
go sendTxs(cs, ctx)
go sendTxs(ctx, cs)
},
3},
}
@ -138,10 +143,10 @@ LOOP:
// create consensus state from a clean slate
logger := log.NewNopLogger()
stateDB := dbm.NewMemDB()
blockDB := dbm.NewMemDB()
stateDB := blockDB
state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig)
blockDB := dbm.NewMemDB()
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB)
cs.SetLogger(logger)
@ -260,15 +265,23 @@ func (w *crashingWAL) Stop() error { return w.next.Stop() }
func (w *crashingWAL) Wait() { w.next.Wait() }
//------------------------------------------------------------------------------------------
// Handshake Tests
type testSim struct {
GenesisState sm.State
Config *cfg.Config
Chain []*types.Block
Commits []*types.Commit
CleanupFunc cleanupFunc
}
const (
NUM_BLOCKS = 6
numBlocks = 6
)
var (
mempool = sm.MockMempool{}
mempool = mock.Mempool{}
evpool = sm.MockEvidencePool{}
sim testSim
)
//---------------------------------------
@ -279,93 +292,356 @@ var (
// 2 - save block and committed but state is behind
var modes = []uint{0, 1, 2}
// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay
func TestSimulateValidatorsChange(t *testing.T) {
nPeers := 7
nVals := 4
css, genDoc, config, cleanup := randConsensusNetWithPeers(nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
sim.Config = config
sim.GenesisState, _ = sm.MakeGenesisState(genDoc)
sim.CleanupFunc = cleanup
partSize := types.BlockPartSizeBytes
newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound)
proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal)
vss := make([]*validatorStub, nPeers)
for i := 0; i < nPeers; i++ {
vss[i] = NewValidatorStub(css[i].privValidator, i)
}
height, round := css[0].Height, css[0].Round
// start the machine
startTestRound(css[0], height, round)
incrementHeight(vss...)
ensureNewRound(newRoundCh, height, 0)
ensureNewProposal(proposalCh, height, round)
rs := css[0].GetRoundState()
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
//height 2
height++
incrementHeight(vss...)
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1)
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil)
assert.Nil(t, err)
propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
propBlockParts := propBlock.MakePartSet(partSize)
blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
//height 3
height++
incrementHeight(vss...)
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1)
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil)
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
//height 4
height++
incrementHeight(vss...)
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2)
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil)
assert.Nil(t, err)
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil)
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
newVss := make([]*validatorStub, nVals+1)
copy(newVss, vss[:nVals+1])
sort.Sort(ValidatorStubsByAddress(newVss))
selfIndex := 0
for i, vs := range newVss {
if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) {
selfIndex = i
break
}
}
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil)
assert.Nil(t, err)
rs = css[0].GetRoundState()
for i := 0; i < nVals+1; i++ {
if i == selfIndex {
continue
}
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
//height 5
height++
incrementHeight(vss...)
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+1; i++ {
if i == selfIndex {
continue
}
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
//height 6
height++
incrementHeight(vss...)
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil)
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
newVss = make([]*validatorStub, nVals+3)
copy(newVss, vss[:nVals+3])
sort.Sort(ValidatorStubsByAddress(newVss))
for i, vs := range newVss {
if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) {
selfIndex = i
break
}
}
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+3; i++ {
if i == selfIndex {
continue
}
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
sim.Chain = make([]*types.Block, 0)
sim.Commits = make([]*types.Commit, 0)
for i := 1; i <= numBlocks; i++ {
sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i)))
sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i)))
}
}
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, 0, m)
for _, m := range modes {
testHandshakeReplay(t, config, 0, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, 0, m, true)
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, 1, m)
for _, m := range modes {
testHandshakeReplay(t, config, 1, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, 1, m, true)
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, NUM_BLOCKS-1, m)
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks-1, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks-1, m, true)
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
for i, m := range modes {
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
defer os.RemoveAll(config.RootDir)
testHandshakeReplay(t, config, NUM_BLOCKS, m)
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks, m, true)
}
}
// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx
func TestMockProxyApp(t *testing.T) {
sim.CleanupFunc() //clean the test env created in TestSimulateValidatorsChange
logger := log.TestingLogger()
var validTxs, invalidTxs = 0, 0
txIndex := 0
assert.NotPanics(t, func() {
abciResWithEmptyDeliverTx := new(sm.ABCIResponses)
abciResWithEmptyDeliverTx.DeliverTx = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.DeliverTx = append(abciResWithEmptyDeliverTx.DeliverTx, &abci.ResponseDeliverTx{})
// called when saveABCIResponses:
bytes := cdc.MustMarshalBinaryBare(abciResWithEmptyDeliverTx)
loadedAbciRes := new(sm.ABCIResponses)
// this also happens sm.LoadABCIResponses
err := cdc.UnmarshalBinaryBare(bytes, loadedAbciRes)
require.NoError(t, err)
mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes)
abciRes := new(sm.ABCIResponses)
abciRes.DeliverTx = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTx))
// Execute transactions and get hash.
proxyCb := func(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *abci.Response_DeliverTx:
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
txRes := r.DeliverTx
if txRes.Code == abci.CodeTypeOK {
validTxs++
} else {
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
invalidTxs++
}
abciRes.DeliverTx[txIndex] = txRes
txIndex++
}
}
mock.SetResponseCallback(proxyCb)
someTx := []byte("tx")
mock.DeliverTxAsync(someTx)
})
assert.True(t, validTxs == 1)
assert.True(t, invalidTxs == 0)
}
func tempWALWithData(data []byte) string {
walFile, err := ioutil.TempFile("", "wal")
if err != nil {
panic(fmt.Errorf("failed to create temp WAL file: %v", err))
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
}
_, err = walFile.Write(data)
if err != nil {
panic(fmt.Errorf("failed to write to temp WAL file: %v", err))
panic(fmt.Sprintf("failed to write to temp WAL file: %v", err))
}
if err := walFile.Close(); err != nil {
panic(fmt.Errorf("failed to close temp WAL file: %v", err))
panic(fmt.Sprintf("failed to close temp WAL file: %v", err))
}
return walFile.Name()
}
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
walBody, err := WALWithNBlocks(t, NUM_BLOCKS)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
var chain []*types.Block
var commits []*types.Commit
var store *mockBlockStore
var stateDB dbm.DB
var genisisState sm.State
if testValidatorsChange {
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
defer os.RemoveAll(testConfig.RootDir)
stateDB = dbm.NewMemDB()
genisisState = sim.GenesisState
config = sim.Config
chain = sim.Chain
commits = sim.Commits
store = newMockBlockStore(config, genisisState.ConsensusParams)
} else { //test single node
testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
defer os.RemoveAll(testConfig.RootDir)
walBody, err := WALWithNBlocks(t, numBlocks)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
wal, err := NewWAL(walFile)
require.NoError(t, err)
wal.SetLogger(log.TestingLogger())
err = wal.Start()
require.NoError(t, err)
defer wal.Stop()
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
chain, commits, err := makeBlockchainFromWAL(wal)
require.NoError(t, err)
wal, err := NewWAL(walFile)
require.NoError(t, err)
wal.SetLogger(log.TestingLogger())
err = wal.Start()
require.NoError(t, err)
defer wal.Stop()
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
chain, commits, err = makeBlockchainFromWAL(wal)
require.NoError(t, err)
stateDB, genisisState, store = stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
}
store.chain = chain
store.commits = commits
state := genisisState.Copy()
// run the chain through state.ApplyBlock to build up the tendermint state
state = buildTMStateFromChain(config, stateDB, state, chain, mode)
state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode)
latestAppHash := state.AppHash
// make a new client creator
kvstoreApp := kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "2"))
kvstoreApp := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode)))
clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp)
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2)
stateDB, state, _ := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode)
stateDB1 := dbm.NewMemDB()
sm.SaveState(stateDB1, genisisState)
buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode)
}
// now start the app using the handshake - it should sync
@ -391,8 +667,8 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
}
expectedBlocksToSync := NUM_BLOCKS - nBlocks
if nBlocks == NUM_BLOCKS && mode > 0 {
expectedBlocksToSync := numBlocks - nBlocks
if nBlocks == numBlocks && mode > 0 {
expectedBlocksToSync++
} else if nBlocks > 0 && mode == 1 {
expectedBlocksToSync++
@ -407,7 +683,7 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap
testPartSize := types.BlockPartSizeBytes
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()}
blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()}
newState, err := blockExec.ApplyBlock(st, blkID, blk)
if err != nil {
panic(err)
@ -423,12 +699,14 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
}
defer proxyApp.Stop()
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
Validators: validators,
}); err != nil {
panic(err)
}
sm.SaveState(stateDB, state) //save height 1's validatorsInfo
switch mode {
case 0:
@ -451,21 +729,23 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
}
func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State {
func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, nBlocks int, mode uint) sm.State {
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1")))
clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))))
proxyApp := proxy.NewAppConns(clientCreator)
if err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
Validators: validators,
}); err != nil {
panic(err)
}
sm.SaveState(stateDB, state) //save height 1's validatorsInfo
switch mode {
case 0:
@ -489,28 +769,162 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c
return state
}
func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
// 1. Initialize tendermint and commit 3 blocks with the following app hashes:
// - 0x01
// - 0x02
// - 0x03
config := ResetConfig("handshake_test_")
defer os.RemoveAll(config.RootDir)
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
const appVersion = 0x0
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), appVersion)
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
state.LastValidators = state.Validators.Copy()
// mode = 0 for committing all the blocks
blocks := makeBlocks(3, &state, privVal)
store.chain = blocks
// 2. Tendermint must panic if app returns wrong hash for the first block
// - RANDOM HASH
// - 0x02
// - 0x03
{
app := &badApp{numBlocks: 3, allHashesAreWrong: true}
clientCreator := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(clientCreator)
err := proxyApp.Start()
require.NoError(t, err)
defer proxyApp.Stop()
assert.Panics(t, func() {
h := NewHandshaker(stateDB, state, store, genDoc)
h.Handshake(proxyApp)
})
}
// 3. Tendermint must panic if app returns wrong hash for the last block
// - 0x01
// - 0x02
// - RANDOM HASH
{
app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true}
clientCreator := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(clientCreator)
err := proxyApp.Start()
require.NoError(t, err)
defer proxyApp.Stop()
assert.Panics(t, func() {
h := NewHandshaker(stateDB, state, store, genDoc)
h.Handshake(proxyApp)
})
}
}
func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block {
blocks := make([]*types.Block, 0)
var (
prevBlock *types.Block
prevBlockMeta *types.BlockMeta
)
appHeight := byte(0x01)
for i := 0; i < n; i++ {
height := int64(i + 1)
block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height)
blocks = append(blocks, block)
prevBlock = block
prevBlockMeta = types.NewBlockMeta(block, parts)
// update state
state.AppHash = []byte{appHeight}
appHeight++
state.LastBlockHeight = height
}
return blocks
}
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
addr := privVal.GetPubKey().Address()
idx, _ := valset.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: idx,
Height: header.Height,
Round: 1,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: blockID,
}
privVal.SignVote(header.ChainID, vote)
return vote
}
func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta,
privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) {
lastCommit := types.NewCommit(types.BlockID{}, nil)
if height > 1 {
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVal).CommitSig()
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
}
return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address)
}
type badApp struct {
abci.BaseApplication
numBlocks byte
height byte
allHashesAreWrong bool
onlyLastHashIsWrong bool
}
func (app *badApp) Commit() abci.ResponseCommit {
app.height++
if app.onlyLastHashIsWrong {
if app.height == app.numBlocks {
return abci.ResponseCommit{Data: cmn.RandBytes(8)}
}
return abci.ResponseCommit{Data: []byte{app.height}}
} else if app.allHashesAreWrong {
return abci.ResponseCommit{Data: cmn.RandBytes(8)}
}
panic("either allHashesAreWrong or onlyLastHashIsWrong must be set")
}
//--------------------------
// utils for making blocks
func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
var height int64
// Search for height marker
gr, found, err := wal.SearchForEndHeight(0, &WALSearchOptions{})
gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{})
if err != nil {
return nil, nil, err
}
if !found {
return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1)
return nil, nil, fmt.Errorf("WAL does not contain height %d", height)
}
defer gr.Close() // nolint: errcheck
// log.Notice("Build a blockchain by reading from the WAL")
var blocks []*types.Block
var commits []*types.Commit
var thisBlockParts *types.PartSet
var thisBlockCommit *types.Commit
var height int64
var (
blocks []*types.Block
commits []*types.Commit
thisBlockParts *types.PartSet
thisBlockCommit *types.Commit
)
dec := NewWALDecoder(gr)
for {
@ -602,7 +1016,8 @@ func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version.
stateDB := dbm.NewMemDB()
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
state.Version.Consensus.App = appVersion
store := NewMockBlockStore(config, state.ConsensusParams)
store := newMockBlockStore(config, state.ConsensusParams)
sm.SaveState(stateDB, state)
return stateDB, state, store
}
@ -617,7 +1032,7 @@ type mockBlockStore struct {
}
// TODO: NewBlockStore(db.NewMemDB) ...
func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
return &mockBlockStore{config, params, nil, nil}
}
@ -626,7 +1041,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{block.Hash(), block.MakePartSet(types.BlockPartSizeBytes).Header()},
BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()},
Header: block.Header,
}
}
@ -640,15 +1055,16 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
return bs.commits[height-1]
}
//----------------------------------------
//---------------------------------------
// Test handshake/init chain
func TestInitChainUpdateValidators(t *testing.T) {
func TestHandshakeUpdatesValidators(t *testing.T) {
val, _ := types.RandValidator(true, 10)
vals := types.NewValidatorSet([]*types.Validator{val})
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := proxy.NewLocalClientCreator(app)
config := ResetConfig("proxy_test_")
config := ResetConfig("handshake_test_")
defer os.RemoveAll(config.RootDir)
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)


+ 15
- 24
consensus/state.go View File

@ -484,18 +484,9 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
return
}
seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight)
lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.PrecommitType, state.LastValidators)
for _, precommit := range seenCommit.Precommits {
if precommit == nil {
continue
}
added, err := lastPrecommits.AddVote(seenCommit.ToVote(precommit))
if !added || err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
}
}
lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators)
if !lastPrecommits.HasTwoThirdsMajority() {
cmn.PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj")
panic("Failed to reconstruct LastCommit: Does not have +2/3 maj")
}
cs.LastCommit = lastPrecommits
}
@ -504,13 +495,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight.
func (cs *ConsensusState) updateToState(state sm.State) {
if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight {
cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v",
panic(fmt.Sprintf("updateToState() expected state height of %v but found %v",
cs.Height, state.LastBlockHeight))
}
if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height {
// This might happen when someone else is mutating cs.state.
// Someone forgot to pass in state.Copy() somewhere?!
cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
cs.state.LastBlockHeight+1, cs.Height))
}
@ -530,7 +521,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
lastPrecommits := (*types.VoteSet)(nil)
if cs.CommitRound > -1 && cs.Votes != nil {
if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() {
cmn.PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3")
panic("updateToState(state) called but last Precommit round didn't have +2/3")
}
lastPrecommits = cs.Votes.Precommits(cs.CommitRound)
}
@ -1047,7 +1038,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
return
}
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
panic(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
}
logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
@ -1103,7 +1094,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// the latest POLRound should be this round.
polRound, _ := cs.Votes.POLInfo()
if polRound < round {
cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound))
panic(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound))
}
// +2/3 prevoted nil. Unlock and precommit nil.
@ -1137,7 +1128,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
// Validate the block.
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
panic(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
}
cs.LockedRound = round
cs.LockedBlock = cs.ProposalBlock
@ -1175,7 +1166,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
return
}
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
panic(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
}
logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
@ -1214,7 +1205,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority()
if !ok {
cmn.PanicSanity("RunActionCommit() expects +2/3 precommits")
panic("RunActionCommit() expects +2/3 precommits")
}
// The Locked* fields no longer matter.
@ -1247,7 +1238,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
logger := cs.Logger.With("height", height)
if cs.Height != height {
cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
}
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
@ -1277,16 +1268,16 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
if !ok {
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority"))
panic(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority"))
}
if !blockParts.HasHeader(blockID.PartsHeader) {
cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header"))
panic(fmt.Sprintf("Expected ProposalBlockParts header to be commit header"))
}
if !block.HashesTo(blockID.Hash) {
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
panic(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
}
if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil {
cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err))
panic(fmt.Sprintf("+2/3 committed an invalid block: %v", err))
}
cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs),


+ 38
- 33
consensus/state_test.go View File

@ -239,7 +239,7 @@ func TestStateFullRound1(t *testing.T) {
cs.SetEventBus(eventBus)
eventBus.Start()
voteCh := subscribe(cs.eventBus, types.EventQueryVote)
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
@ -267,7 +267,7 @@ func TestStateFullRoundNil(t *testing.T) {
cs, vss := randConsensusState(1)
height, round := cs.Height, cs.Round
voteCh := subscribe(cs.eventBus, types.EventQueryVote)
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
cs.enterPrevote(height, round)
cs.startRoutines(4)
@ -286,7 +286,7 @@ func TestStateFullRound2(t *testing.T) {
vs2 := vss[1]
height, round := cs1.Height, cs1.Round
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote)
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
// start round and wait for propose and prevote
@ -330,7 +330,7 @@ func TestStateLockNoPOL(t *testing.T) {
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
@ -370,7 +370,7 @@ func TestStateLockNoPOL(t *testing.T) {
// (note we're entering precommit for a second time this round)
// but with invalid args. then we enterPrecommitWait, and the timeout to new round
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
///
@ -384,7 +384,7 @@ func TestStateLockNoPOL(t *testing.T) {
incrementRound(vs2)
// now we're on a new round and not the proposer, so wait for timeout
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
rs := cs1.GetRoundState()
@ -403,7 +403,7 @@ func TestStateLockNoPOL(t *testing.T) {
// now we're going to enter prevote again, but with invalid args
// and then prevote wait, which should timeout. then wait for precommit
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
ensurePrecommit(voteCh, height, round) // precommit
// the proposed block should still be locked and our precommit added
@ -416,7 +416,7 @@ func TestStateLockNoPOL(t *testing.T) {
// (note we're entering precommit for a second time this round, but with invalid args
// then we enterPrecommitWait and timeout into NewRound
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
round = round + 1 // entering new round
ensureNewRound(newRoundCh, height, round)
@ -441,7 +441,7 @@ func TestStateLockNoPOL(t *testing.T) {
signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
ensurePrevote(voteCh, height, round)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
ensurePrecommit(voteCh, height, round) // precommit
validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
@ -449,7 +449,7 @@ func TestStateLockNoPOL(t *testing.T) {
signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
ensurePrecommit(voteCh, height, round)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
cs2, _ := randConsensusState(2) // needed so generated block is different than locked block
// before we time out into new round, set next proposal block
@ -482,7 +482,7 @@ func TestStateLockNoPOL(t *testing.T) {
signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
ensurePrevote(voteCh, height, round)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
ensurePrecommit(voteCh, height, round)
validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
@ -542,7 +542,7 @@ func TestStateLockPOLRelock(t *testing.T) {
incrementRound(vs2, vs3, vs4)
// timeout to new round
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
round = round + 1 // moving to the next round
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
@ -632,7 +632,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
propBlockParts := propBlock.MakePartSet(partSize)
// timeout to new round
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
rs = cs1.GetRoundState()
lockedBlockHash := rs.LockedBlock.Hash()
@ -710,7 +710,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
// cs1 precommit nil
ensurePrecommit(voteCh, height, round)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
t.Log("### ONTO ROUND 1")
@ -754,7 +754,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
incrementRound(vs2, vs3, vs4)
round = round + 1 // moving to the next round
@ -767,7 +767,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
*/
// timeout of propose
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
// finish prevote
ensurePrevote(voteCh, height, round)
@ -850,7 +850,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
incrementRound(vs2, vs3, vs4)
// timeout of precommit wait to new round
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
round = round + 1 // moving to the next round
// in round 2 we see the polkad block from round 0
@ -919,7 +919,7 @@ func TestProposeValidBlock(t *testing.T) {
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
incrementRound(vs2, vs3, vs4)
round = round + 1 // moving to the next round
@ -929,7 +929,7 @@ func TestProposeValidBlock(t *testing.T) {
t.Log("### ONTO ROUND 2")
// timeout of propose
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlockHash)
@ -952,7 +952,7 @@ func TestProposeValidBlock(t *testing.T) {
ensureNewRound(newRoundCh, height, round)
t.Log("### ONTO ROUND 3")
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
round = round + 1 // moving to the next round
@ -1004,7 +1004,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
// vs3 send prevote nil
signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs3)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
ensurePrecommit(voteCh, height, round)
// we should have precommitted
@ -1052,7 +1052,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
startTestRound(cs1, cs1.Height, round)
ensureNewRound(newRoundCh, height, round)
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
@ -1065,7 +1065,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
ensureNewValidBlock(validBlockCh, height, round)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
ensurePrecommit(voteCh, height, round)
validatePrecommit(t, cs1, round, -1, vss[0], nil, nil)
@ -1099,7 +1099,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) {
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
ensureNewRound(newRoundCh, height, round+1)
}
@ -1131,7 +1131,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
rs := cs1.GetRoundState()
assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
@ -1165,7 +1165,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
ensurePrecommit(voteCh, height, round)
validatePrecommit(t, cs1, round, -1, vss[0], nil, nil)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
round = round + 1 // moving to the next round
ensureNewRound(newRoundCh, height, round)
@ -1191,7 +1191,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
incrementRound(vss[1:]...)
signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
@ -1332,7 +1332,7 @@ func TestStartNextHeightCorrectly(t *testing.T) {
cs1.txNotifier.(*fakeTxNotifier).Notify()
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.TimeoutPropose.Nanoseconds())
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds())
rs = cs1.GetRoundState()
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round")
}
@ -1375,12 +1375,8 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
// add precommits
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
time.Sleep(5 * time.Millisecond)
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
rs = cs1.GetRoundState()
assert.True(t, rs.TriggeredTimeoutPrecommit)
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
prop, propBlock := decideProposal(cs1, vs2, height+1, 0)
@ -1519,7 +1515,7 @@ func TestStateHalt1(t *testing.T) {
incrementRound(vs2, vs3, vs4)
// timeout to new round
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
round = round + 1 // moving to the next round
@ -1627,3 +1623,12 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa
}
return sub.Out()
}
// subscribe subscribes test client to the given query and returns a channel with cap = 0.
func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q)
if err != nil {
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
}
return sub.Out()
}

+ 3
- 5
consensus/types/height_vote_set.go View File

@ -6,7 +6,6 @@ import (
"strings"
"sync"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@ -83,7 +82,7 @@ func (hvs *HeightVoteSet) SetRound(round int) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if hvs.round != 0 && (round < hvs.round+1) {
cmn.PanicSanity("SetRound() must increment hvs.round")
panic("SetRound() must increment hvs.round")
}
for r := hvs.round + 1; r <= round; r++ {
if _, ok := hvs.roundVoteSets[r]; ok {
@ -96,7 +95,7 @@ func (hvs *HeightVoteSet) SetRound(round int) {
func (hvs *HeightVoteSet) addRound(round int) {
if _, ok := hvs.roundVoteSets[round]; ok {
cmn.PanicSanity("addRound() for an existing round")
panic("addRound() for an existing round")
}
// log.Debug("addRound(round)", "round", round)
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet)
@ -169,8 +168,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *type
case types.PrecommitType:
return rvs.Precommits
default:
cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_))
return nil
panic(fmt.Sprintf("Unexpected vote type %X", type_))
}
}


+ 5
- 2
consensus/wal_generator.go View File

@ -10,12 +10,14 @@ import (
"time"
"github.com/pkg/errors"
"github.com/tendermint/tendermint/abci/example/kvstore"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
@ -45,13 +47,14 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
if err != nil {
return errors.Wrap(err, "failed to read genesis file")
}
stateDB := db.NewMemDB()
blockStoreDB := db.NewMemDB()
stateDB := blockStoreDB
state, err := sm.MakeGenesisState(genDoc)
if err != nil {
return errors.Wrap(err, "failed to make genesis state")
}
state.Version.Consensus.App = kvstore.ProtocolVersion
sm.SaveState(stateDB, state)
blockStore := bc.NewBlockStore(blockStoreDB)
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app))
proxyApp.SetLogger(logger.With("module", "proxy"))
@ -66,7 +69,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
return errors.Wrap(err, "failed to start event bus")
}
defer eventBus.Stop()
mempool := sm.MockMempool{}
mempool := mock.Mempool{}
evpool := sm.MockEvidencePool{}
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)


+ 71
- 0
crypto/merkle/simple_tree.go View File

@ -20,6 +20,77 @@ func SimpleHashFromByteSlices(items [][]byte) []byte {
}
}
// SimpleHashFromByteSliceIterative is an iterative alternative to
// SimpleHashFromByteSlice motivated by potential performance improvements.
// (#2611) had suggested that an iterative version of
// SimpleHashFromByteSlice would be faster, presumably because
// we can envision some overhead accumulating from stack
// frames and function calls. Additionally, a recursive algorithm risks
// hitting the stack limit and causing a stack overflow should the tree
// be too large.
//
// Provided here is an iterative alternative, a simple test to assert
// correctness and a benchmark. On the performance side, there appears to
// be no overall difference:
//
// BenchmarkSimpleHashAlternatives/recursive-4 20000 77677 ns/op
// BenchmarkSimpleHashAlternatives/iterative-4 20000 76802 ns/op
//
// On the surface it might seem that the additional overhead is due to
// the different allocation patterns of the implementations. The recursive
// version uses a single [][]byte slices which it then re-slices at each level of the tree.
// The iterative version reproduces [][]byte once within the function and
// then rewrites sub-slices of that array at each level of the tree.
//
// Experimenting by modifying the code to simply calculate the
// hash and not store the result show little to no difference in performance.
//
// These preliminary results suggest:
//
// 1. The performance of the SimpleHashFromByteSlice is pretty good
// 2. Go has low overhead for recursive functions
// 3. The performance of the SimpleHashFromByteSlice routine is dominated
// by the actual hashing of data
//
// Although this work is in no way exhaustive, point #3 suggests that
// optimization of this routine would need to take an alternative
// approach to make significant improvements on the current performance.
//
// Finally, considering that the recursive implementation is easier to
// read, it might not be worthwhile to switch to a less intuitive
// implementation for so little benefit.
func SimpleHashFromByteSlicesIterative(input [][]byte) []byte {
items := make([][]byte, len(input))
for i, leaf := range input {
items[i] = leafHash(leaf)
}
size := len(items)
for {
switch size {
case 0:
return nil
case 1:
return items[0]
default:
rp := 0 // read position
wp := 0 // write position
for rp < size {
if rp+1 < size {
items[wp] = innerHash(items[rp], items[rp+1])
rp += 2
} else {
items[wp] = items[rp]
rp += 1
}
wp += 1
}
size = wp
}
}
}
// SimpleHashFromMap computes a Merkle tree from sorted map.
// Like calling SimpleHashFromHashers with
// `item = []byte(Hash(key) | Hash(value))`,


+ 36
- 0
crypto/merkle/simple_tree_test.go View File

@ -70,6 +70,42 @@ func TestSimpleProof(t *testing.T) {
}
}
func TestSimpleHashAlternatives(t *testing.T) {
total := 100
items := make([][]byte, total)
for i := 0; i < total; i++ {
items[i] = testItem(cmn.RandBytes(tmhash.Size))
}
rootHash1 := SimpleHashFromByteSlicesIterative(items)
rootHash2 := SimpleHashFromByteSlices(items)
require.Equal(t, rootHash1, rootHash2, "Unmatched root hashes: %X vs %X", rootHash1, rootHash2)
}
func BenchmarkSimpleHashAlternatives(b *testing.B) {
total := 100
items := make([][]byte, total)
for i := 0; i < total; i++ {
items[i] = testItem(cmn.RandBytes(tmhash.Size))
}
b.ResetTimer()
b.Run("recursive", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = SimpleHashFromByteSlices(items)
}
})
b.Run("iterative", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = SimpleHashFromByteSlicesIterative(items)
}
})
}
func Test_getSplitPoint(t *testing.T) {
tests := []struct {
length int


+ 11
- 4
crypto/multisig/multisignature.go View File

@ -1,7 +1,8 @@
package multisig
import (
"errors"
"fmt"
"strings"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/multisig/bitarray"
@ -53,13 +54,19 @@ func (mSig *Multisignature) AddSignature(sig []byte, index int) {
mSig.Sigs[newSigIndex] = sig
}
// AddSignatureFromPubKey adds a signature to the multisig,
// at the index in keys corresponding to the provided pubkey.
// AddSignatureFromPubKey adds a signature to the multisig, at the index in
// keys corresponding to the provided pubkey.
func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error {
index := getIndex(pubkey, keys)
if index == -1 {
return errors.New("provided key didn't exist in pubkeys")
keysStr := make([]string, len(keys))
for i, k := range keys {
keysStr[i] = fmt.Sprintf("%X", k.Bytes())
}
return fmt.Errorf("provided key %X doesn't exist in pubkeys: \n%s", pubkey.Bytes(), strings.Join(keysStr, "\n"))
}
mSig.AddSignature(sig, index)
return nil
}


+ 56
- 18
crypto/multisig/threshold_pubkey_test.go View File

@ -36,30 +36,68 @@ func TestThresholdMultisigValidCases(t *testing.T) {
for tcIndex, tc := range cases {
multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys)
multisignature := NewMultisig(len(tc.pubkeys))
for i := 0; i < tc.k-1; i++ {
signingIndex := tc.signingIndices[i]
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig passed when i < k, tc %d, i %d", tcIndex, i)
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.Equal(t, i+1, len(multisignature.Sigs),
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
require.NoError(
t,
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
)
require.False(
t,
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig passed when i < k, tc %d, i %d", tcIndex, i,
)
require.NoError(
t,
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
)
require.Equal(
t,
i+1,
len(multisignature.Sigs),
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex,
)
}
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig passed with k - 1 sigs, tc %d", tcIndex)
multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys)
require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig failed after k good signatures, tc %d", tcIndex)
require.False(
t,
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig passed with k - 1 sigs, tc %d", tcIndex,
)
require.NoError(
t,
multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys),
)
require.True(
t,
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig failed after k good signatures, tc %d", tcIndex,
)
for i := tc.k + 1; i < len(tc.signingIndices); i++ {
signingIndex := tc.signingIndices[i]
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.Equal(t, tc.passAfterKSignatures[i-tc.k-1],
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i)
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.Equal(t, i+1, len(multisignature.Sigs),
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
require.NoError(
t,
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
)
require.Equal(
t,
tc.passAfterKSignatures[i-tc.k-1],
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i,
)
require.NoError(
t,
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
)
require.Equal(
t,
i+1,
len(multisignature.Sigs),
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex,
)
}
}
}


+ 2
- 3
crypto/xsalsa20symmetric/symmetric.go View File

@ -7,7 +7,6 @@ import (
"golang.org/x/crypto/nacl/secretbox"
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
)
// TODO, make this into a struct that implements crypto.Symmetric.
@ -19,7 +18,7 @@ const secretLen = 32
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) {
if len(secret) != secretLen {
cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
}
nonce := crypto.CRandBytes(nonceLen)
nonceArr := [nonceLen]byte{}
@ -36,7 +35,7 @@ func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) {
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) {
if len(secret) != secretLen {
cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
}
if len(ciphertext) <= secretbox.Overhead+nonceLen {
return nil, errors.New("Ciphertext is too short")


+ 1
- 1
docs/app-dev/subscribing-to-events-via-websocket.md View File

@ -2,7 +2,7 @@
Tendermint emits different events, to which you can subscribe via
[Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful
for third-party applications (for analysys) or inspecting state.
for third-party applications (for analysis) or inspecting state.
[List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants)


+ 40
- 20
docs/architecture/adr-025-commit.md View File

@ -1,14 +1,18 @@
# ADR 025 Commit
## Context
Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data.
In particular it contains an array of every precommit from the validators, which includes many copies of the same data. Such as `Height`, `Round`, `Type`, and `BlockID`. Also the `ValidatorIndex` could be derived from the vote's position in the array, and the `ValidatorAddress` could potentially be derived from runtime context. The only truely necessary data is the `Signature` and `Timestamp` associated with each `Vote`.
It contains a list of precommits from every validator, where the precommit
includes the whole `Vote` structure. Thus each of the commit height, round,
type, and blockID are repeated for every validator, and could be deduplicated.
```
type Commit struct {
BlockID BlockID `json:"block_id"`
Precommits []*Vote `json:"precommits"`
}
type Vote struct {
ValidatorAddress Address `json:"validator_address"`
ValidatorIndex int `json:"validator_index"`
@ -26,7 +30,9 @@ References:
[#2226](https://github.com/tendermint/tendermint/issues/2226)
## Proposed Solution
We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself.
```
type Commit struct {
Height int64
@ -34,42 +40,56 @@ type Commit struct {
BlockID BlockID `json:"block_id"`
Precommits []*CommitSig `json:"precommits"`
}
type CommitSig struct {
BlockID BlockIDFlag
ValidatorAddress Address
Signature []byte
Timestamp time.Time
Signature []byte
}
```
Continuing to store the `ValidatorAddress` in the `CommitSig` takes up extra space, but simplifies the process and allows for easier debugging.
## Status
Proposed
## Consequences
// indicate which BlockID the signature is for
type BlockIDFlag int
### Positive
The size of a `Commit` transmitted over the network goes from:
const (
BlockIDFlagAbsent BlockIDFlag = iota // vote is not included in the Commit.Precommits
BlockIDFlagCommit // voted for the Commit.BlockID
BlockIDFlagNil // voted for nil
)
|BlockID| + n * (|Address| + |ValidatorIndex| + |Height| + |Round| + |Timestamp| + |Type| + |BlockID| + |Signature|)
```
to:
Note the need for an extra byte to indicate whether the signature is for the BlockID or for nil.
This byte can also be used to indicate an absent vote, rather than using a nil object like we currently do,
which has been [problematic for compatibility between Amino and proto3](https://github.com/tendermint/go-amino/issues/260).
Note we also continue to store the `ValidatorAddress` in the `CommitSig`.
While this still takes 20-bytes per signature, it ensures that the Commit has all
information necessary to reconstruct Vote, which simplifies mapping between Commit and Vote objects
and with debugging. It also may be necessary for the light-client to know which address a signature corresponds to if
it is trying to verify a current commit with an older validtor set.
## Status
|BlockID|+|Height|+|Round| + n*(|Address| + |Signature| + |Timestamp|)
Proposed
This saves:
## Consequences
n * (|BlockID| + |ValidatorIndex| + |Type|) + (n-1) * (Height + Round)
### Positive
In the current context, this would concretely be:
(assuming all ints are int64, and hashes are 32 bytes)
Removing the Type/Height/Round/Index and the BlockID saves roughly 80 bytes per precommit.
It varies because some integers are varint. The BlockID contains two 32-byte hashes an integer,
and the Height is 8-bytes.
n *(72 + 8 + 1 + 8 + 8) - 16 = n * 97 - 16
For a chain with 100 validators, that's up to 8kB in savings per block!
With 100 validators this is a savings of almost 10KB on every block.
### Negative
This would add some complexity to the processing and verification of blocks and commits, as votes would have to be reconstructed to be verified and gossiped. The reconstruction could be relatively straightforward, only requiring the copying of data from the `Commit` itself into the newly created `Vote`.
- Large breaking change to the block and commit structure
- Requires differentiating in code between the Vote and CommitSig objects, which may add some complexity (votes need to be reconstructed to be verified and gossiped)
### Neutral
This design leaves the `ValidatorAddress` in the `CommitSig` and in the `Vote`. These could be removed at some point for additional savings, but that would introduce more complexity, and make printing of `Commit` and `VoteSet` objects less informative, which could harm debugging efficiency and UI/UX.
- Commit.Precommits no longer contains nil values

+ 100
- 0
docs/architecture/adr-037-deliver-block.md View File

@ -0,0 +1,100 @@
# ADR 037: Deliver Block
Author: Daniil Lashin (@danil-lashin)
## Changelog
13-03-2019: Initial draft
## Context
Initial conversation: https://github.com/tendermint/tendermint/issues/2901
Some applications can handle transactions in parallel, or at least some
part of tx processing can be parallelized. Now it is not possible for developer
to execute txs in parallel because Tendermint delivers them consequentially.
## Decision
Now Tendermint have `BeginBlock`, `EndBlock`, `Commit`, `DeliverTx` steps
while executing block. This doc proposes merging this steps into one `DeliverBlock`
step. It will allow developers of applications to decide how they want to
execute transactions (in parallel or consequentially). Also it will simplify and
speed up communications between application and Tendermint.
As @jaekwon [mentioned](https://github.com/tendermint/tendermint/issues/2901#issuecomment-477746128)
in discussion not all application will benefit from this solution. In some cases,
when application handles transaction consequentially, it way slow down the blockchain,
because it need to wait until full block is transmitted to application to start
processing it. Also, in the case of complete change of ABCI, we need to force all the apps
to change their implementation completely. That's why I propose to introduce one more ABCI
type.
# Implementation Changes
In addition to default application interface which now have this structure
```go
type Application interface {
// Info and Mempool methods...
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
}
```
this doc proposes to add one more:
```go
type Application interface {
// Info and Mempool methods...
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
DeliverBlock(RequestDeliverBlock) ResponseDeliverBlock // Deliver full block
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
}
type RequestDeliverBlock struct {
Hash []byte
Header Header
Txs Txs
LastCommitInfo LastCommitInfo
ByzantineValidators []Evidence
}
type ResponseDeliverBlock struct {
ValidatorUpdates []ValidatorUpdate
ConsensusParamUpdates *ConsensusParams
Tags []common.KVPair
TxResults []ResponseDeliverTx
}
```
Also, we will need to add new config param, which will specify what kind of ABCI application uses.
For example, it can be `abci_type`. Then we will have 2 types:
- `advanced` - current ABCI
- `simple` - proposed implementation
## Status
In review
## Consequences
### Positive
- much simpler introduction and tutorials for new developers (instead of implementing 5 methods whey
will need to implement only 3)
- txs can be handled in parallel
- simpler interface
- faster communications between Tendermint and application
### Negative
- Tendermint should now support 2 kinds of ABCI

+ 534
- 0
docs/architecture/adr-040-blockchain-reactor-refactor.md View File

@ -0,0 +1,534 @@
# ADR 040: Blockchain Reactor Refactor
## Changelog
19-03-2019: Initial draft
## Context
The Blockchain Reactor's high level responsibility is to enable peers who are far behind the current state of the
blockchain to quickly catch up by downloading many blocks in parallel from its peers, verifying block correctness, and
executing them against the ABCI application. We call the protocol executed by the Blockchain Reactor `fast-sync`.
The current architecture diagram of the blockchain reactor can be found here:
![Blockchain Reactor Architecture Diagram](img/bc-reactor.png)
The current architecture consists of dozens of routines and it is tightly depending on the `Switch`, making writing
unit tests almost impossible. Current tests require setting up complex dependency graphs and dealing with concurrency.
Note that having dozens of routines is in this case overkill as most of the time routines sits idle waiting for
something to happen (message to arrive or timeout to expire). Due to dependency on the `Switch`, testing relatively
complex network scenarios and failures (for example adding and removing peers) is very complex tasks and frequently lead
to complex tests with not deterministic behavior ([#3400]). Impossibility to write proper tests makes confidence in
the code low and this resulted in several issues (some are fixed in the meantime and some are still open):
[#3400], [#2897], [#2896], [#2699], [#2888], [#2457], [#2622], [#2026].
## Decision
To remedy these issues we plan a major refactor of the blockchain reactor. The proposed architecture is largely inspired
by ADR-30 and is presented on the following diagram:
![Blockchain Reactor Refactor Diagram](img/bc-reactor-refactor.png)
We suggest a concurrency architecture where the core algorithm (we call it `Controller`) is extracted into a finite
state machine. The active routine of the reactor is called `Executor` and is responsible for receiving and sending
messages from/to peers and triggering timeouts. What messages should be sent and timeouts triggered is determined mostly
by the `Controller`. The exception is `Peer Heartbeat` mechanism which is `Executor` responsibility. The heartbeat
mechanism is used to remove slow and unresponsive peers from the peer list. Writing of unit tests is simpler with
this architecture as most of the critical logic is part of the `Controller` function. We expect that simpler concurrency
architecture will not have significant negative effect on the performance of this reactor (to be confirmed by
experimental evaluation).
### Implementation changes
We assume the following system model for "fast sync" protocol:
* a node is connected to a random subset of all nodes that represents its peer set. Some nodes are correct and some
might be faulty. We don't make assumptions about ratio of faulty nodes, i.e., it is possible that all nodes in some
peer set are faulty.
* we assume that communication between correct nodes is synchronous, i.e., if a correct node `p` sends a message `m` to
a correct node `q` at time `t`, then `q` will receive message the latest at time `t+Delta` where `Delta` is a system
parameter that is known by network participants. `Delta` is normally chosen to be an order of magnitude higher than
the real communication delay (maximum) between correct nodes. Therefore if a correct node `p` sends a request message
to a correct node `q` at time `t` and there is no the corresponding reply at time `t + 2*Delta`, then `p` can assume
that `q` is faulty. Note that the network assumptions for the consensus reactor are different (we assume partially
synchronous model there).
The requirements for the "fast sync" protocol are formally specified as follows:
- `Correctness`: If a correct node `p` is connected to a correct node `q` for a long enough period of time, then `p`
- will eventually download all requested blocks from `q`.
- `Termination`: If a set of peers of a correct node `p` is stable (no new nodes are added to the peer set of `p`) for
- a long enough period of time, then protocol eventually terminates.
- `Fairness`: A correct node `p` sends requests for blocks to all peers from its peer set.
As explained above, the `Executor` is responsible for sending and receiving messages that are part of the `fast-sync`
protocol. The following messages are exchanged as part of `fast-sync` protocol:
``` go
type Message int
const (
MessageUnknown Message = iota
MessageStatusRequest
MessageStatusResponse
MessageBlockRequest
MessageBlockResponse
)
```
`MessageStatusRequest` is sent periodically to all peers as a request for a peer to provide its current height. It is
part of the `Peer Heartbeat` mechanism and a failure to respond timely to this message results in a peer being removed
from the peer set. Note that the `Peer Heartbeat` mechanism is used only while a peer is in `fast-sync` mode. We assume
here existence of a mechanism that gives node a possibility to inform its peers that it is in the `fast-sync` mode.
``` go
type MessageStatusRequest struct {
SeqNum int64 // sequence number of the request
}
```
`MessageStatusResponse` is sent as a response to `MessageStatusRequest` to inform requester about the peer current
height.
``` go
type MessageStatusResponse struct {
SeqNum int64 // sequence number of the corresponding request
Height int64 // current peer height
}
```
`MessageBlockRequest` is used to make a request for a block and the corresponding commit certificate at a given height.
``` go
type MessageBlockRequest struct {
Height int64
}
```
`MessageBlockResponse` is a response for the corresponding block request. In addition to providing the block and the
corresponding commit certificate, it contains also a current peer height.
``` go
type MessageBlockResponse struct {
Height int64
Block Block
Commit Commit
PeerHeight int64
}
```
In addition to sending and receiving messages, and `HeartBeat` mechanism, controller is also managing timeouts
that are triggered upon `Controller` request. `Controller` is then informed once a timeout expires.
``` go
type TimeoutTrigger int
const (
TimeoutUnknown TimeoutTrigger = iota
TimeoutResponseTrigger
TimeoutTerminationTrigger
)
```
The `Controller` can be modelled as a function with clearly defined inputs:
* `State` - current state of the node. Contains data about connected peers and its behavior, pending requests,
* received blocks, etc.
* `Event` - significant events in the network.
producing clear outputs:
* `State` - updated state of the node,
* `MessageToSend` - signal what message to send and to which peer
* `TimeoutTrigger` - signal that timeout should be triggered.
We consider the following `Event` types:
``` go
type Event int
const (
EventUnknown Event = iota
EventStatusReport
EventBlockRequest
EventBlockResponse
EventRemovePeer
EventTimeoutResponse
EventTimeoutTermination
)
```
`EventStatusResponse` event is generated once `MessageStatusResponse` is received by the `Executor`.
``` go
type EventStatusReport struct {
PeerID ID
Height int64
}
```
`EventBlockRequest` event is generated once `MessageBlockRequest` is received by the `Executor`.
``` go
type EventBlockRequest struct {
Height int64
PeerID p2p.ID
}
```
`EventBlockResponse` event is generated upon reception of `MessageBlockResponse` message by the `Executor`.
``` go
type EventBlockResponse struct {
Height int64
Block Block
Commit Commit
PeerID ID
PeerHeight int64
}
```
`EventRemovePeer` is generated by `Executor` to signal that the connection to a peer is closed due to peer misbehavior.
``` go
type EventRemovePeer struct {
PeerID ID
}
```
`EventTimeoutResponse` is generated by `Executor` to signal that a timeout triggered by `TimeoutResponseTrigger` has
expired.
``` go
type EventTimeoutResponse struct {
PeerID ID
Height int64
}
```
`EventTimeoutTermination` is generated by `Executor` to signal that a timeout triggered by `TimeoutTerminationTrigger`
has expired.
``` go
type EventTimeoutTermination struct {
Height int64
}
```
`MessageToSend` is just a wrapper around `Message` type that contains id of the peer to which message should be sent.
``` go
type MessageToSend struct {
PeerID ID
Message Message
}
```
The Controller state machine can be in two modes: `ModeFastSync` when
a node is trying to catch up with the network by downloading committed blocks,
and `ModeConsensus` in which it executes Tendermint consensus protocol. We
consider that `fast sync` mode terminates once the Controller switch to
`ModeConsensus`.
``` go
type Mode int
const (
ModeUnknown Mode = iota
ModeFastSync
ModeConsensus
)
```
`Controller` is managing the following state:
``` go
type ControllerState struct {
Height int64 // the first block that is not committed
Mode Mode // mode of operation
PeerMap map[ID]PeerStats // map of peer IDs to peer statistics
MaxRequestPending int64 // maximum height of the pending requests
FailedRequests []int64 // list of failed block requests
PendingRequestsNum int // total number of pending requests
Store []BlockInfo // contains list of downloaded blocks
Executor BlockExecutor // store, verify and executes blocks
}
```
`PeerStats` data structure keeps for every peer its current height and a list of pending requests for blocks.
``` go
type PeerStats struct {
Height int64
PendingRequest int64 // a request sent to this peer
}
```
`BlockInfo` data structure is used to store information (as part of block store) about downloaded blocks: from what peer
a block and the corresponding commit certificate are received.
``` go
type BlockInfo struct {
Block Block
Commit Commit
PeerID ID // a peer from which we received the corresponding Block and Commit
}
```
The `Controller` is initialized by providing an initial height (`startHeight`) from which it will start downloading
blocks from peers and the current state of the `BlockExecutor`.
``` go
func NewControllerState(startHeight int64, executor BlockExecutor) ControllerState {
state = ControllerState {}
state.Height = startHeight
state.Mode = ModeFastSync
state.MaxRequestPending = startHeight - 1
state.PendingRequestsNum = 0
state.Executor = executor
initialize state.PeerMap, state.FailedRequests and state.Store to empty data structures
return state
}
```
The core protocol logic is given with the following function:
``` go
func handleEvent(state ControllerState, event Event) (ControllerState, Message, TimeoutTrigger, Error) {
msg = nil
timeout = nil
error = nil
switch state.Mode {
case ModeConsensus:
switch event := event.(type) {
case EventBlockRequest:
msg = createBlockResponseMessage(state, event)
return state, msg, timeout, error
default:
error = "Only respond to BlockRequests while in ModeConsensus!"
return state, msg, timeout, error
}
case ModeFastSync:
switch event := event.(type) {
case EventBlockRequest:
msg = createBlockResponseMessage(state, event)
return state, msg, timeout, error
case EventStatusResponse:
return handleEventStatusResponse(event, state)
case EventRemovePeer:
return handleEventRemovePeer(event, state)
case EventBlockResponse:
return handleEventBlockResponse(event, state)
case EventResponseTimeout:
return handleEventResponseTimeout(event, state)
case EventTerminationTimeout:
// Termination timeout is triggered in case of empty peer set and in case there are no pending requests.
// If this timeout expires and in the meantime no new peers are added or new pending requests are made
// then `fast-sync` mode terminates by switching to `ModeConsensus`.
// Note that termination timeout should be higher than the response timeout.
if state.Height == event.Height && state.PendingRequestsNum == 0 { state.State = ConsensusMode }
return state, msg, timeout, error
default:
error = "Received unknown event type!"
return state, msg, timeout, error
}
}
}
```
``` go
func createBlockResponseMessage(state ControllerState, event BlockRequest) MessageToSend {
msgToSend = nil
if _, ok := state.PeerMap[event.PeerID]; !ok { peerStats = PeerStats{-1, -1} }
if state.Executor.ContainsBlockWithHeight(event.Height) && event.Height > peerStats.Height {
peerStats = event.Height
msg = BlockResponseMessage{
Height: event.Height,
Block: state.Executor.getBlock(eventHeight),
Commit: state.Executor.getCommit(eventHeight),
PeerID: event.PeerID,
CurrentHeight: state.Height - 1,
}
msgToSend = MessageToSend { event.PeerID, msg }
}
state.PeerMap[event.PeerID] = peerStats
return msgToSend
}
```
``` go
func handleEventStatusResponse(event EventStatusResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) {
if _, ok := state.PeerMap[event.PeerID]; !ok {
peerStats = PeerStats{ -1, -1 }
} else {
peerStats = state.PeerMap[event.PeerID]
}
if event.Height > peerStats.Height { peerStats.Height = event.Height }
// if there are no pending requests for this peer, try to send him a request for block
if peerStats.PendingRequest == -1 {
msg = createBlockRequestMessages(state, event.PeerID, peerStats.Height)
// msg is nil if no request for block can be made to a peer at this point in time
if msg != nil {
peerStats.PendingRequests = msg.Height
state.PendingRequestsNum++
// when a request for a block is sent to a peer, a response timeout is triggered. If no corresponding block is sent by the peer
// during response timeout period, then the peer is considered faulty and is removed from the peer set.
timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout }
} else if state.PendingRequestsNum == 0 {
// if there are no pending requests and no new request can be placed to the peer, termination timeout is triggered.
// If termination timeout expires and we are still at the same height and there are no pending requests, the "fast-sync"
// mode is finished and we switch to `ModeConsensus`.
timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout }
}
}
state.PeerMap[event.PeerID] = peerStats
return state, msg, timeout, error
}
```
``` go
func handleEventRemovePeer(event EventRemovePeer, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) {
if _, ok := state.PeerMap[event.PeerID]; ok {
pendingRequest = state.PeerMap[event.PeerID].PendingRequest
// if a peer is removed from the peer set, its pending request is declared failed and added to the `FailedRequests` list
// so it can be retried.
if pendingRequest != -1 {
add(state.FailedRequests, pendingRequest)
}
state.PendingRequestsNum--
delete(state.PeerMap, event.PeerID)
// if the peer set is empty after removal of this peer then termination timeout is triggered.
if state.PeerMap.isEmpty() {
timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout }
}
} else { error = "Removing unknown peer!" }
return state, msg, timeout, error
```
``` go
func handleEventBlockResponse(event EventBlockResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error)
if state.PeerMap[event.PeerID] {
peerStats = state.PeerMap[event.PeerID]
// when expected block arrives from a peer, it is added to the store so it can be verified and if correct executed after.
if peerStats.PendingRequest == event.Height {
peerStats.PendingRequest = -1
state.PendingRequestsNum--
if event.PeerHeight > peerStats.Height { peerStats.Height = event.PeerHeight }
state.Store[event.Height] = BlockInfo{ event.Block, event.Commit, event.PeerID }
// blocks are verified sequentially so adding a block to the store does not mean that it will be immediately verified
// as some of the previous blocks might be missing.
state = verifyBlocks(state) // it can lead to event.PeerID being removed from peer list
if _, ok := state.PeerMap[event.PeerID]; ok {
// we try to identify new request for a block that can be asked to the peer
msg = createBlockRequestMessage(state, event.PeerID, peerStats.Height)
if msg != nil {
peerStats.PendingRequests = msg.Height
state.PendingRequestsNum++
// if request for block is made, response timeout is triggered
timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout }
} else if state.PeerMap.isEmpty() || state.PendingRequestsNum == 0 {
// if the peer map is empty (the peer can be removed as block verification failed) or there are no pending requests
// termination timeout is triggered.
timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout }
}
}
} else { error = "Received Block from wrong peer!" }
} else { error = "Received Block from unknown peer!" }
state.PeerMap[event.PeerID] = peerStats
return state, msg, timeout, error
}
```
``` go
func handleEventResponseTimeout(event, state) {
if _, ok := state.PeerMap[event.PeerID]; ok {
peerStats = state.PeerMap[event.PeerID]
// if a response timeout expires and the peer hasn't delivered the block, the peer is removed from the peer list and
// the request is added to the `FailedRequests` so the block can be downloaded from other peer
if peerStats.PendingRequest == event.Height {
add(state.FailedRequests, pendingRequest)
delete(state.PeerMap, event.PeerID)
state.PendingRequestsNum--
// if peer set is empty, then termination timeout is triggered
if state.PeerMap.isEmpty() {
timeout = TimeoutTrigger{ state.Height, TerminationTimeout }
}
}
}
return state, msg, timeout, error
}
```
``` go
func createBlockRequestMessage(state ControllerState, peerID ID, peerHeight int64) MessageToSend {
msg = nil
blockHeight = -1
r = find request in state.FailedRequests such that r <= peerHeight // returns `nil` if there are no such request
// if there is a height in failed requests that can be downloaded from the peer send request to it
if r != nil {
blockNumber = r
delete(state.FailedRequests, r)
} else if state.MaxRequestPending < peerHeight {
// if height of the maximum pending request is smaller than peer height, then ask peer for next block
state.MaxRequestPending++
blockHeight = state.MaxRequestPending // increment state.MaxRequestPending and then return the new value
}
if blockHeight > -1 { msg = MessageToSend { peerID, MessageBlockRequest { blockHeight } }
return msg
}
```
``` go
func verifyBlocks(state State) State {
done = false
for !done {
block = state.Store[height]
if block != nil {
verified = verify block.Block using block.Commit // return `true` is verification succeed, 'false` otherwise
if verified {
block.Execute() // executing block is costly operation so it might make sense executing asynchronously
state.Height++
} else {
// if block verification failed, then it is added to `FailedRequests` and the peer is removed from the peer set
add(state.FailedRequests, height)
state.Store[height] = nil
if _, ok := state.PeerMap[block.PeerID]; ok {
pendingRequest = state.PeerMap[block.PeerID].PendingRequest
// if there is a pending request sent to the peer that is just to be removed from the peer set, add it to `FailedRequests`
if pendingRequest != -1 {
add(state.FailedRequests, pendingRequest)
state.PendingRequestsNum--
}
delete(state.PeerMap, event.PeerID)
}
done = true
}
} else { done = true }
}
return state
}
```
In the proposed architecture `Controller` is not active task, i.e., it is being called by the `Executor`. Depending on
the return values returned by `Controller`,`Executor` will send a message to some peer (`msg` != nil), trigger a
timeout (`timeout` != nil) or deal with errors (`error` != nil).
In case a timeout is triggered, it will provide as an input to `Controller` the corresponding timeout event once
timeout expires.
## Status
Draft.
## Consequences
### Positive
- isolated implementation of the algorithm
- improved testability - simpler to prove correctness
- clearer separation of concerns - easier to reason
### Negative
### Neutral

+ 29
- 0
docs/architecture/adr-041-proposer-selection-via-abci.md View File

@ -0,0 +1,29 @@
# ADR 041: Application should be in charge of validator set
## Changelog
## Context
Currently Tendermint is in charge of validator set and proposer selection. Application can only update the validator set changes at EndBlock time.
To support Light Client, application should make sure at least 2/3 of validator are same at each round.
Application should have full control on validator set changes and proposer selection. In each round Application can provide the list of validators for next rounds in order with their power. The proposer is the first in the list, in case the proposer is offline, the next one can propose the proposal and so on.
## Decision
## Status
## Consequences
Tendermint is no more in charge of validator set and its changes. The Application should provide the correct information.
However Tendermint can provide psedo-randomness algorithm to help application for selecting proposer in each round.
### Positive
### Negative
### Neutral
## References

BIN
docs/architecture/img/bc-reactor-refactor.png View File

Before After
Width: 1460  |  Height: 260  |  Size: 7.9 KiB

BIN
docs/architecture/img/bc-reactor.png View File

Before After
Width: 1520  |  Height: 1340  |  Size: 43 KiB

+ 5
- 7
docs/introduction/install.md View File

@ -79,9 +79,7 @@ make install
Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7).
### Ubuntu
Install LevelDB with snappy (optionally):
Install LevelDB with snappy (optionally). Below are commands for Ubuntu:
```
sudo apt-get update
@ -100,23 +98,23 @@ wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \
rm -f v1.20.tar.gz
```
Set database backend to cleveldb:
Set a database backend to `cleveldb`:
```
# config/config.toml
db_backend = "cleveldb"
```
To install Tendermint, run
To install Tendermint, run:
```
CGO_LDFLAGS="-lsnappy" make install_c
```
or run
or run:
```
CGO_LDFLAGS="-lsnappy" make build_c
```
to put the binary in `./build`.
which puts the binary in `./build`.

+ 9
- 7
docs/networks/terraform-and-ansible.md View File

@ -62,16 +62,18 @@ There are several roles that are self-explanatory:
First, we configure our droplets by specifying the paths for tendermint
(`BINARY`) and the node files (`CONFIGDIR`). The latter expects any
number of directories named `node0, node1, ...` and so on (equal to the
number of droplets created). For this example, we use pre-created files
from [this
directory](https://github.com/tendermint/tendermint/tree/master/docs/examples).
To create your own files, use either the `tendermint testnet` command or
review [manual deployments](./deploy-testnets.md).
number of droplets created).
Here's the command to run:
To create the node files run:
```
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples
tendermint testnet
```
Then, to configure our droplets run:
```
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet
```
Voila! All your droplets now have the `tendermint` binary and required


+ 1
- 1
docs/spec/abci/apps.md View File

@ -265,7 +265,7 @@ This is enforced by Tendermint consensus.
If a block includes evidence older than this, the block will be rejected
(validators won't vote for it).
Must have `0 < MaxAge`.
Must have `MaxAge > 0`.
### Updates


+ 6
- 6
docs/spec/p2p/config.md View File

@ -12,14 +12,14 @@ and upon incoming connection shares some peers and disconnects.
## Seeds
`--p2p.seeds “1.2.3.4:26656,2.3.4.5:4444”`
`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”`
Dials these seeds when we need more peers. They should return a list of peers and then disconnect.
If we already have enough peers in the address book, we may never need to dial them.
## Persistent Peers
`--p2p.persistent_peers “1.2.3.4:26656,2.3.4.5:26656”`
`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”`
Dial these peers and auto-redial them if the connection fails.
These are intended to be trusted persistent peers that can help
@ -30,9 +30,9 @@ backoff and will give up after a day of trying to connect.
the user will be warned that seeds may auto-close connections
and that the node may not be able to keep the connection persistent.
## Private Persistent Peers
## Private Peers
`--p2p.private_persistent_peers “1.2.3.4:26656,2.3.4.5:26656”`
`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”`
These are persistent peers that we do not add to the address book or
gossip to other peers. They stay private to us.
These are IDs of the peers that we do not add to the address book or gossip to
other peers. They stay private to us.

+ 11
- 8
docs/spec/reactors/pex/pex.md View File

@ -21,17 +21,20 @@ inbound (they dialed our public address) or outbound (we dialed them).
## Discovery
Peer discovery begins with a list of seeds.
When we have no peers, or have been unable to find enough peers from existing ones,
we dial a randomly selected seed to get a list of peers to dial.
When we don't have enough peers, we
1. ask existing peers
2. dial seeds if we're not dialing anyone currently
On startup, we will also immediately dial the given list of `persistent_peers`,
and will attempt to maintain persistent connections with them. If the connections die, or we fail to dial,
we will redial every 5s for a few minutes, then switch to an exponential backoff schedule,
and after about a day of trying, stop dialing the peer.
and will attempt to maintain persistent connections with them. If the
connections die, or we fail to dial, we will redial every 5s for a few minutes,
then switch to an exponential backoff schedule, and after about a day of
trying, stop dialing the peer.
So long as we have less than `MaxNumOutboundPeers`, we periodically request additional peers
from each of our own. If sufficient time goes by and we still can't find enough peers,
we try the seeds again.
As long as we have less than `MaxNumOutboundPeers`, we periodically request
additional peers from each of our own and try seeds.
## Listening


+ 13
- 2
docs/tendermint-core/configuration.md View File

@ -30,8 +30,19 @@ moniker = "anonymous"
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb | cleveldb
db_backend = "leveldb"
# Database backend: goleveldb | cleveldb | boltdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
db_backend = "goleveldb"
# Database directory
db_dir = "data"


+ 1
- 1
docs/tendermint-core/running-in-production.md View File

@ -8,7 +8,7 @@ key-value database. Unfortunately, this implementation of LevelDB seems to suffe
install the real C-implementation of LevelDB and compile Tendermint to use
that using `make build_c`. See the [install instructions](../introduction/install.md) for details.
Tendermint keeps multiple distinct LevelDB databases in the `$TMROOT/data`:
Tendermint keeps multiple distinct databases in the `$TMROOT/data`:
- `blockstore.db`: Keeps the entire blockchain - stores blocks,
block commits, and block meta data, each indexed by height. Used to sync new


+ 0
- 5
evidence/reactor.go View File

@ -60,11 +60,6 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
go evR.broadcastEvidenceRoutine(peer)
}
// RemovePeer implements Reactor.
func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// nothing to do
}
// Receive implements Reactor.
// It adds any received evidence to the evpool.
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {


+ 1
- 1
libs/README.md View File

@ -13,7 +13,7 @@ CLI wraps the `cobra` and `viper` packages and handles some common elements of b
## clist
Clist provides a linekd list that is safe for concurrent access by many readers.
Clist provides a linked list that is safe for concurrent access by many readers.
## common


+ 3
- 1
libs/clist/clist_test.go View File

@ -261,6 +261,8 @@ func TestWaitChan(t *testing.T) {
pushed++
time.Sleep(time.Duration(cmn.RandIntn(25)) * time.Millisecond)
}
// apply a deterministic pause so the counter has time to catch up
time.Sleep(25 * time.Millisecond)
close(done)
}()
@ -273,7 +275,7 @@ FOR_LOOP:
next = next.Next()
seen++
if next == nil {
continue
t.Fatal("Next should not be nil when waiting on NextWaitChan")
}
case <-done:
break FOR_LOOP


+ 0
- 32
libs/common/errors.go View File

@ -212,35 +212,3 @@ func (fe FmtError) String() string {
func (fe FmtError) Format() string {
return fe.format
}
//----------------------------------------
// Panic wrappers
// XXX DEPRECATED
// A panic resulting from a sanity check means there is a programmer error
// and some guarantee is not satisfied.
// XXX DEPRECATED
func PanicSanity(v interface{}) {
panic(fmt.Sprintf("Panicked on a Sanity Check: %v", v))
}
// A panic here means something has gone horribly wrong, in the form of data corruption or
// failure of the operating system. In a correct/healthy system, these should never fire.
// If they do, it's indicative of a much more serious problem.
// XXX DEPRECATED
func PanicCrisis(v interface{}) {
panic(fmt.Sprintf("Panicked on a Crisis: %v", v))
}
// Indicates a failure of consensus. Someone was malicious or something has
// gone horribly wrong. These should really boot us into an "emergency-recover" mode
// XXX DEPRECATED
func PanicConsensus(v interface{}) {
panic(fmt.Sprintf("Panicked on a Consensus Failure: %v", v))
}
// For those times when we're not sure if we should panic
// XXX DEPRECATED
func PanicQ(v interface{}) {
panic(fmt.Sprintf("Panicked questionably: %v", v))
}

+ 1
- 1
libs/common/random.go View File

@ -300,7 +300,7 @@ func cRandBytes(numBytes int) []byte {
b := make([]byte, numBytes)
_, err := crand.Read(b)
if err != nil {
PanicCrisis(err)
panic(err)
}
return b
}

+ 1
- 2
libs/common/service.go View File

@ -194,8 +194,7 @@ func (bs *BaseService) Reset() error {
// OnReset implements Service by panicking.
func (bs *BaseService) OnReset() error {
PanicSanity("The service cannot be reset")
return nil
panic("The service cannot be reset")
}
// IsRunning implements Service by returning true or false depending on the


+ 1
- 0
libs/db/backend_test.go View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)


+ 353
- 0
libs/db/boltdb.go View File

@ -0,0 +1,353 @@
// +build boltdb
package db
import (
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/etcd-io/bbolt"
)
var bucket = []byte("tm")
func init() {
registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) {
return NewBoltDB(name, dir)
}, false)
}
// BoltDB is a wrapper around etcd's fork of bolt
// (https://github.com/etcd-io/bbolt).
//
// NOTE: All operations (including Set, Delete) are synchronous by default. One
// can globally turn it off by using NoSync config option (not recommended).
//
// A single bucket ([]byte("tm")) is used per a database instance. This could
// lead to performance issues when/if there will be lots of keys.
type BoltDB struct {
db *bbolt.DB
}
// NewBoltDB returns a BoltDB with default options.
func NewBoltDB(name, dir string) (DB, error) {
return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions)
}
// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not
// supported because NewBoltDBWithOpts creates a global bucket.
func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) {
if opts.ReadOnly {
return nil, errors.New("ReadOnly: true is not supported")
}
dbPath := filepath.Join(dir, name+".db")
db, err := bbolt.Open(dbPath, os.ModePerm, opts)
if err != nil {
return nil, err
}
// create a global bucket
err = db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
if err != nil {
return nil, err
}
return &BoltDB{db: db}, nil
}
func (bdb *BoltDB) Get(key []byte) (value []byte) {
key = nonEmptyKey(nonNilBytes(key))
err := bdb.db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(bucket)
value = b.Get(key)
return nil
})
if err != nil {
panic(err)
}
return
}
func (bdb *BoltDB) Has(key []byte) bool {
return bdb.Get(key) != nil
}
func (bdb *BoltDB) Set(key, value []byte) {
key = nonEmptyKey(nonNilBytes(key))
value = nonNilBytes(value)
err := bdb.db.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket(bucket)
return b.Put(key, value)
})
if err != nil {
panic(err)
}
}
func (bdb *BoltDB) SetSync(key, value []byte) {
bdb.Set(key, value)
}
func (bdb *BoltDB) Delete(key []byte) {
key = nonEmptyKey(nonNilBytes(key))
err := bdb.db.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(bucket).Delete(key)
})
if err != nil {
panic(err)
}
}
func (bdb *BoltDB) DeleteSync(key []byte) {
bdb.Delete(key)
}
func (bdb *BoltDB) Close() {
bdb.db.Close()
}
func (bdb *BoltDB) Print() {
stats := bdb.db.Stats()
fmt.Printf("%v\n", stats)
err := bdb.db.View(func(tx *bbolt.Tx) error {
tx.Bucket(bucket).ForEach(func(k, v []byte) error {
fmt.Printf("[%X]:\t[%X]\n", k, v)
return nil
})
return nil
})
if err != nil {
panic(err)
}
}
func (bdb *BoltDB) Stats() map[string]string {
stats := bdb.db.Stats()
m := make(map[string]string)
// Freelist stats
m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN)
m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN)
m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc)
m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse)
// Transaction stats
m["TxN"] = fmt.Sprintf("%v", stats.TxN)
m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN)
return m
}
// boltDBBatch stores key values in sync.Map and dumps them to the underlying
// DB upon Write call.
type boltDBBatch struct {
buffer []struct {
k []byte
v []byte
}
db *BoltDB
}
// NewBatch returns a new batch.
func (bdb *BoltDB) NewBatch() Batch {
return &boltDBBatch{
buffer: make([]struct {
k []byte
v []byte
}, 0),
db: bdb,
}
}
// It is safe to modify the contents of the argument after Set returns but not
// before.
func (bdb *boltDBBatch) Set(key, value []byte) {
bdb.buffer = append(bdb.buffer, struct {
k []byte
v []byte
}{
key, value,
})
}
// It is safe to modify the contents of the argument after Delete returns but
// not before.
func (bdb *boltDBBatch) Delete(key []byte) {
for i, elem := range bdb.buffer {
if bytes.Equal(elem.k, key) {
// delete without preserving order
bdb.buffer[i] = bdb.buffer[len(bdb.buffer)-1]
bdb.buffer = bdb.buffer[:len(bdb.buffer)-1]
return
}
}
}
// NOTE: the operation is synchronous (see BoltDB for reasons)
func (bdb *boltDBBatch) Write() {
err := bdb.db.db.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(bucket)
for _, elem := range bdb.buffer {
if putErr := b.Put(elem.k, elem.v); putErr != nil {
return putErr
}
}
return nil
})
if err != nil {
panic(err)
}
}
func (bdb *boltDBBatch) WriteSync() {
bdb.Write()
}
func (bdb *boltDBBatch) Close() {}
// WARNING: Any concurrent writes or reads will block until the iterator is
// closed.
func (bdb *BoltDB) Iterator(start, end []byte) Iterator {
tx, err := bdb.db.Begin(false)
if err != nil {
panic(err)
}
return newBoltDBIterator(tx, start, end, false)
}
// WARNING: Any concurrent writes or reads will block until the iterator is
// closed.
func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator {
tx, err := bdb.db.Begin(false)
if err != nil {
panic(err)
}
return newBoltDBIterator(tx, start, end, true)
}
// boltDBIterator allows you to iterate on range of keys/values given some
// start / end keys (nil & nil will result in doing full scan).
type boltDBIterator struct {
tx *bbolt.Tx
itr *bbolt.Cursor
start []byte
end []byte
currentKey []byte
currentValue []byte
isInvalid bool
isReverse bool
}
func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator {
itr := tx.Bucket(bucket).Cursor()
var ck, cv []byte
if isReverse {
if end == nil {
ck, cv = itr.Last()
} else {
_, _ = itr.Seek(end) // after key
ck, cv = itr.Prev() // return to end key
}
} else {
if start == nil {
ck, cv = itr.First()
} else {
ck, cv = itr.Seek(start)
}
}
return &boltDBIterator{
tx: tx,
itr: itr,
start: start,
end: end,
currentKey: ck,
currentValue: cv,
isReverse: isReverse,
isInvalid: false,
}
}
func (itr *boltDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
func (itr *boltDBIterator) Valid() bool {
if itr.isInvalid {
return false
}
// iterated to the end of the cursor
if len(itr.currentKey) == 0 {
itr.isInvalid = true
return false
}
if itr.isReverse {
if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 {
itr.isInvalid = true
return false
}
} else {
if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 {
itr.isInvalid = true
return false
}
}
// Valid
return true
}
func (itr *boltDBIterator) Next() {
itr.assertIsValid()
if itr.isReverse {
itr.currentKey, itr.currentValue = itr.itr.Prev()
} else {
itr.currentKey, itr.currentValue = itr.itr.Next()
}
}
func (itr *boltDBIterator) Key() []byte {
itr.assertIsValid()
return itr.currentKey
}
func (itr *boltDBIterator) Value() []byte {
itr.assertIsValid()
return itr.currentValue
}
func (itr *boltDBIterator) Close() {
err := itr.tx.Rollback()
if err != nil {
panic(err)
}
}
func (itr *boltDBIterator) assertIsValid() {
if !itr.Valid() {
panic("Boltdb-iterator is invalid")
}
}
// nonEmptyKey returns a []byte("nil") if key is empty.
// WARNING: this may collude with "nil" user key!
func nonEmptyKey(key []byte) []byte {
if len(key) == 0 {
return []byte("nil")
}
return key
}

+ 37
- 0
libs/db/boltdb_test.go View File

@ -0,0 +1,37 @@
// +build boltdb
package db
import (
"fmt"
"os"
"testing"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestBoltDBNewBoltDB(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
dir := os.TempDir()
defer cleanupDBDir(dir, name)
db, err := NewBoltDB(name, dir)
require.NoError(t, err)
db.Close()
}
func BenchmarkBoltDBRandomReadsWrites(b *testing.B) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
db, err := NewBoltDB(name, "")
if err != nil {
b.Fatal(err)
}
defer func() {
db.Close()
cleanupDBDir("", name)
}()
benchmarkRandomReadsWrites(b, db)
}

+ 1
- 1
libs/db/c_level_db.go View File

@ -1,4 +1,4 @@
// +build gcc
// +build cleveldb
package db


+ 3
- 3
libs/db/c_level_db_test.go View File

@ -1,4 +1,4 @@
// +build gcc
// +build cleveldb
package db
@ -93,7 +93,7 @@ func TestCLevelDBBackend(t *testing.T) {
// Can't use "" (current directory) or "./" here because levigo.Open returns:
// "Error initializing DB: IO error: test_XXX.db: Invalid argument"
dir := os.TempDir()
db := NewDB(name, LevelDBBackend, dir)
db := NewDB(name, CLevelDBBackend, dir)
defer cleanupDBDir(dir, name)
_, ok := db.(*CLevelDB)
@ -103,7 +103,7 @@ func TestCLevelDBBackend(t *testing.T) {
func TestCLevelDBStats(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
dir := os.TempDir()
db := NewDB(name, LevelDBBackend, dir)
db := NewDB(name, CLevelDBBackend, dir)
defer cleanupDBDir(dir, name)
assert.NotEmpty(t, db.Stats())


+ 66
- 0
libs/db/common_test.go View File

@ -1,6 +1,8 @@
package db
import (
"bytes"
"encoding/binary"
"fmt"
"io/ioutil"
"sync"
@ -8,6 +10,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)
//----------------------------------------
@ -188,3 +191,66 @@ func (mockIterator) Value() []byte {
func (mockIterator) Close() {
}
func benchmarkRandomReadsWrites(b *testing.B, db DB) {
b.StopTimer()
// create dummy data
const numItems = int64(1000000)
internal := map[int64]int64{}
for i := 0; i < int(numItems); i++ {
internal[int64(i)] = int64(0)
}
// fmt.Println("ok, starting")
b.StartTimer()
for i := 0; i < b.N; i++ {
// Write something
{
idx := int64(cmn.RandInt()) % numItems
internal[idx]++
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := int642Bytes(int64(val))
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
db.Set(idxBytes, valBytes)
}
// Read something
{
idx := int64(cmn.RandInt()) % numItems
valExp := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := db.Get(idxBytes)
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
if valExp == 0 {
if !bytes.Equal(valBytes, nil) {
b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes)
break
}
} else {
if len(valBytes) != 8 {
b.Errorf("Expected length 8 for %v, got %X", idx, valBytes)
break
}
valGot := bytes2Int64(valBytes)
if valExp != valGot {
b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot)
break
}
}
}
}
}
func int642Bytes(i int64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(i))
return buf
}
func bytes2Int64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}

+ 27
- 7
libs/db/db.go View File

@ -5,17 +5,37 @@ import (
"strings"
)
//----------------------------------------
// Main entry
type DBBackendType string
// These are valid backend types.
const (
LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc
CLevelDBBackend DBBackendType = "cleveldb"
// LevelDBBackend is a legacy type. Defaults to goleveldb unless cleveldb
// build tag was used, in which it becomes cleveldb.
// Deprecated: Use concrete types (golevedb, cleveldb, etc.)
LevelDBBackend DBBackendType = "leveldb"
// GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most
// popular implementation)
// - pure go
// - stable
GoLevelDBBackend DBBackendType = "goleveldb"
MemDBBackend DBBackendType = "memdb"
FSDBBackend DBBackendType = "fsdb" // using the filesystem naively
// CLevelDBBackend represents cleveldb (uses levigo wrapper)
// - fast
// - requires gcc
// - use cleveldb build tag (go build -tags cleveldb)
CLevelDBBackend DBBackendType = "cleveldb"
// MemDBBackend represents in-memoty key value store, which is mostly used
// for testing.
MemDBBackend DBBackendType = "memdb"
// FSDBBackend represents filesystem database
// - EXPERIMENTAL
// - slow
FSDBBackend DBBackendType = "fsdb"
// BoltDBBackend represents bolt (uses etcd's fork of bolt -
// github.com/etcd-io/bbolt)
// - EXPERIMENTAL
// - may be faster is some use-cases (random reads - indexer)
// - use boltdb build tag (go build -tags boltdb)
BoltDBBackend DBBackendType = "boltdb"
)
type dbCreator func(name string, dir string) (DB, error)


+ 1
- 1
libs/db/fsdb.go View File

@ -20,7 +20,7 @@ const (
)
func init() {
registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) {
registerDBCreator(FSDBBackend, func(name, dir string) (DB, error) {
dbPath := filepath.Join(dir, name+".db")
return NewFSDB(dbPath), nil
}, false)


+ 4
- 6
libs/db/go_level_db.go View File

@ -9,8 +9,6 @@ import (
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
cmn "github.com/tendermint/tendermint/libs/common"
)
func init() {
@ -67,7 +65,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) {
value = nonNilBytes(value)
err := db.db.Put(key, value, nil)
if err != nil {
cmn.PanicCrisis(err)
panic(err)
}
}
@ -77,7 +75,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) {
value = nonNilBytes(value)
err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
if err != nil {
cmn.PanicCrisis(err)
panic(err)
}
}
@ -86,7 +84,7 @@ func (db *GoLevelDB) Delete(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(key, nil)
if err != nil {
cmn.PanicCrisis(err)
panic(err)
}
}
@ -95,7 +93,7 @@ func (db *GoLevelDB) DeleteSync(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
if err != nil {
cmn.PanicCrisis(err)
panic(err)
}
}


+ 17
- 78
libs/db/go_level_db_test.go View File

@ -1,29 +1,27 @@
package db
import (
"bytes"
"encoding/binary"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestNewGoLevelDB(t *testing.T) {
func TestGoLevelDBNewGoLevelDB(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
// Test write locks
db, err := NewGoLevelDB(name, "")
defer cleanupDBDir("", name)
// Test we can't open the db twice for writing
wr1, err := NewGoLevelDB(name, "")
require.Nil(t, err)
defer os.RemoveAll("./" + name + ".db")
_, err = NewGoLevelDB(name, "")
require.NotNil(t, err)
db.Close() // Close the db to release the lock
wr1.Close() // Close the db to release the lock
// Open the db twice in a row to test read-only locks
// Test we can open the db twice for reading only
ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
defer ro1.Close()
require.Nil(t, err)
@ -32,75 +30,16 @@ func TestNewGoLevelDB(t *testing.T) {
require.Nil(t, err)
}
func BenchmarkRandomReadsWrites(b *testing.B) {
b.StopTimer()
numItems := int64(1000000)
internal := map[int64]int64{}
for i := 0; i < int(numItems); i++ {
internal[int64(i)] = int64(0)
}
db, err := NewGoLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "")
func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
db, err := NewGoLevelDB(name, "")
if err != nil {
b.Fatal(err.Error())
return
b.Fatal(err)
}
defer func() {
db.Close()
cleanupDBDir("", name)
}()
fmt.Println("ok, starting")
b.StartTimer()
for i := 0; i < b.N; i++ {
// Write something
{
idx := (int64(cmn.RandInt()) % numItems)
internal[idx]++
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := int642Bytes(int64(val))
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
db.Set(
idxBytes,
valBytes,
)
}
// Read something
{
idx := (int64(cmn.RandInt()) % numItems)
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := db.Get(idxBytes)
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
if val == 0 {
if !bytes.Equal(valBytes, nil) {
b.Errorf("Expected %v for %v, got %X",
nil, idx, valBytes)
break
}
} else {
if len(valBytes) != 8 {
b.Errorf("Expected length 8 for %v, got %X",
idx, valBytes)
break
}
valGot := bytes2Int64(valBytes)
if val != valGot {
b.Errorf("Expected %v for %v, got %v",
val, idx, valGot)
break
}
}
}
}
db.Close()
}
func int642Bytes(i int64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(i))
return buf
}
func bytes2Int64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
benchmarkRandomReadsWrites(b, db)
}

+ 1
- 1
libs/db/mem_db.go View File

@ -7,7 +7,7 @@ import (
)
func init() {
registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) {
registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) {
return NewMemDB(), nil
}, false)
}


+ 1
- 1
libs/db/remotedb/remotedb_test.go View File

@ -28,7 +28,7 @@ func TestRemoteDB(t *testing.T) {
client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert)
require.Nil(t, err, "expecting a successful client creation")
dbName := "test-remote-db"
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"}))
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"}))
defer func() {
err := os.RemoveAll(dbName + ".db")
if err != nil {


+ 5
- 0
libs/db/util_test.go View File

@ -22,6 +22,11 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) {
// Empty iterator for db populated after iterator created.
func TestPrefixIteratorNoMatch1(t *testing.T) {
for backend := range backends {
if backend == BoltDBBackend {
t.Log("bolt does not support concurrent writes while iterating")
continue
}
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)


+ 5
- 2
libs/pubsub/pubsub_test.go View File

@ -46,13 +46,16 @@ func TestSubscribe(t *testing.T) {
err = s.Publish(ctx, "Asylum")
assert.NoError(t, err)
err = s.Publish(ctx, "Ivan")
assert.NoError(t, err)
}()
select {
case <-published:
assertReceive(t, "Quicksilver", subscription.Out())
assertCancelled(t, subscription, pubsub.ErrOutOfCapacity)
case <-time.After(100 * time.Millisecond):
case <-time.After(3 * time.Second):
t.Fatal("Expected Publish(Asylum) not to block")
}
}
@ -101,7 +104,7 @@ func TestSubscribeUnbuffered(t *testing.T) {
select {
case <-published:
t.Fatal("Expected Publish(Darkhawk) to block")
case <-time.After(100 * time.Millisecond):
case <-time.After(3 * time.Second):
assertReceive(t, "Ultron", subscription.Out())
assertReceive(t, "Darkhawk", subscription.Out())
}


+ 88
- 13
lite/proxy/proxy.go View File

@ -5,12 +5,15 @@ import (
"net/http"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/libs/log"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
rpcclient "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
"github.com/tendermint/tendermint/types"
)
const (
@ -66,21 +69,93 @@ func RPCRoutes(c rpcclient.Client) map[string]*rpcserver.RPCFunc {
"unsubscribe_all": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeAllWS, ""),
// info API
"status": rpcserver.NewRPCFunc(c.Status, ""),
"blockchain": rpcserver.NewRPCFunc(c.BlockchainInfo, "minHeight,maxHeight"),
"genesis": rpcserver.NewRPCFunc(c.Genesis, ""),
"block": rpcserver.NewRPCFunc(c.Block, "height"),
"commit": rpcserver.NewRPCFunc(c.Commit, "height"),
"tx": rpcserver.NewRPCFunc(c.Tx, "hash,prove"),
"validators": rpcserver.NewRPCFunc(c.Validators, "height"),
"status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""),
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"),
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"),
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height"),
// broadcast API
"broadcast_tx_commit": rpcserver.NewRPCFunc(c.BroadcastTxCommit, "tx"),
"broadcast_tx_sync": rpcserver.NewRPCFunc(c.BroadcastTxSync, "tx"),
"broadcast_tx_async": rpcserver.NewRPCFunc(c.BroadcastTxAsync, "tx"),
"broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx"),
"broadcast_tx_sync": rpcserver.NewRPCFunc(makeBroadcastTxSyncFunc(c), "tx"),
"broadcast_tx_async": rpcserver.NewRPCFunc(makeBroadcastTxAsyncFunc(c), "tx"),
// abci API
"abci_query": rpcserver.NewRPCFunc(c.ABCIQuery, "path,data"),
"abci_info": rpcserver.NewRPCFunc(c.ABCIInfo, ""),
"abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data"),
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""),
}
}
func makeStatusFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
return c.Status()
}
}
func makeBlockchainInfoFunc(c rpcclient.Client) func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return c.BlockchainInfo(minHeight, maxHeight)
}
}
func makeGenesisFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
return c.Genesis()
}
}
func makeBlockFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) {
return c.Block(height)
}
}
func makeCommitFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) {
return c.Commit(height)
}
}
func makeTxFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
return c.Tx(hash, prove)
}
}
func makeValidatorsFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) {
return c.Validators(height)
}
}
func makeBroadcastTxCommitFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return c.BroadcastTxCommit(tx)
}
}
func makeBroadcastTxSyncFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxSync(tx)
}
}
func makeBroadcastTxAsyncFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxAsync(tx)
}
}
func makeABCIQueryFunc(c rpcclient.Client) func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
return func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
return c.ABCIQuery(path, data)
}
}
func makeABCIInfoFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
return c.ABCIInfo()
}
}

+ 1
- 1
lite/proxy/verifier.go View File

@ -14,7 +14,7 @@ func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logge
logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client)
memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize)
lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir))
lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.GoLevelDBBackend, rootDir))
trust := lite.NewMultiProvider(
memProvider,
lvlProvider,


+ 2
- 1
mempool/cache_test.go View File

@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
@ -66,7 +67,7 @@ func TestCacheAfterUpdate(t *testing.T) {
tx := types.Tx{byte(v)}
updateTxs = append(updateTxs, tx)
}
mempool.Update(int64(tcIndex), updateTxs, nil, nil)
mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
for _, v := range tc.reAddIndices {
tx := types.Tx{byte(v)}


+ 712
- 0
mempool/clist_mempool.go View File

@ -0,0 +1,712 @@
package mempool
import (
"bytes"
"container/list"
"crypto/sha256"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
auto "github.com/tendermint/tendermint/libs/autofile"
"github.com/tendermint/tendermint/libs/clist"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
//--------------------------------------------------------------------------------
// CListMempool is an ordered in-memory pool for transactions before they are
// proposed in a consensus round. Transaction validity is checked using the
// CheckTx abci message before the transaction is added to the pool. The
// mempool uses a concurrent list structure for storing transactions that can
// be efficiently accessed by multiple concurrent readers.
type CListMempool struct {
config *cfg.MempoolConfig
proxyMtx sync.Mutex
proxyAppConn proxy.AppConnMempool
txs *clist.CList // concurrent linked-list of good txs
preCheck PreCheckFunc
postCheck PostCheckFunc
// Track whether we're rechecking txs.
// These are not protected by a mutex and are expected to be mutated
// in serial (ie. by abci responses which are called in serial).
recheckCursor *clist.CElement // next expected response
recheckEnd *clist.CElement // re-checking stops here
// notify listeners (ie. consensus) when txs are available
notifiedTxsAvailable bool
txsAvailable chan struct{} // fires once for each height, when the mempool is not empty
// Map for quick access to txs to record sender in CheckTx.
// txsMap: txKey -> CElement
txsMap sync.Map
// Atomic integers
height int64 // the last block Update()'d to
rechecking int32 // for re-checking filtered txs on Update()
txsBytes int64 // total size of mempool, in bytes
// Keep a cache of already-seen txs.
// This reduces the pressure on the proxyApp.
cache txCache
// A log of mempool txs
wal *auto.AutoFile
logger log.Logger
metrics *Metrics
}
var _ Mempool = &CListMempool{}
// CListMempoolOption sets an optional parameter on the mempool.
type CListMempoolOption func(*CListMempool)
// NewCListMempool returns a new mempool with the given configuration and connection to an application.
func NewCListMempool(
config *cfg.MempoolConfig,
proxyAppConn proxy.AppConnMempool,
height int64,
options ...CListMempoolOption,
) *CListMempool {
mempool := &CListMempool{
config: config,
proxyAppConn: proxyAppConn,
txs: clist.New(),
height: height,
rechecking: 0,
recheckCursor: nil,
recheckEnd: nil,
logger: log.NewNopLogger(),
metrics: NopMetrics(),
}
if config.CacheSize > 0 {
mempool.cache = newMapTxCache(config.CacheSize)
} else {
mempool.cache = nopTxCache{}
}
proxyAppConn.SetResponseCallback(mempool.globalCb)
for _, option := range options {
option(mempool)
}
return mempool
}
// NOTE: not thread safe - should only be called once, on startup
func (mem *CListMempool) EnableTxsAvailable() {
mem.txsAvailable = make(chan struct{}, 1)
}
// SetLogger sets the Logger.
func (mem *CListMempool) SetLogger(l log.Logger) {
mem.logger = l
}
// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
// false. This is ran before CheckTx.
func WithPreCheck(f PreCheckFunc) CListMempoolOption {
return func(mem *CListMempool) { mem.preCheck = f }
}
// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
// false. This is ran after CheckTx.
func WithPostCheck(f PostCheckFunc) CListMempoolOption {
return func(mem *CListMempool) { mem.postCheck = f }
}
// WithMetrics sets the metrics.
func WithMetrics(metrics *Metrics) CListMempoolOption {
return func(mem *CListMempool) { mem.metrics = metrics }
}
// *panics* if can't create directory or open file.
// *not thread safe*
func (mem *CListMempool) InitWAL() {
walDir := mem.config.WalDir()
err := cmn.EnsureDir(walDir, 0700)
if err != nil {
panic(errors.Wrap(err, "Error ensuring WAL dir"))
}
af, err := auto.OpenAutoFile(walDir + "/wal")
if err != nil {
panic(errors.Wrap(err, "Error opening WAL file"))
}
mem.wal = af
}
func (mem *CListMempool) CloseWAL() {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
if err := mem.wal.Close(); err != nil {
mem.logger.Error("Error closing WAL", "err", err)
}
mem.wal = nil
}
func (mem *CListMempool) Lock() {
mem.proxyMtx.Lock()
}
func (mem *CListMempool) Unlock() {
mem.proxyMtx.Unlock()
}
func (mem *CListMempool) Size() int {
return mem.txs.Len()
}
func (mem *CListMempool) TxsBytes() int64 {
return atomic.LoadInt64(&mem.txsBytes)
}
func (mem *CListMempool) FlushAppConn() error {
return mem.proxyAppConn.FlushSync()
}
func (mem *CListMempool) Flush() {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
mem.cache.Reset()
for e := mem.txs.Front(); e != nil; e = e.Next() {
mem.txs.Remove(e)
e.DetachPrev()
}
mem.txsMap = sync.Map{}
_ = atomic.SwapInt64(&mem.txsBytes, 0)
}
// TxsFront returns the first transaction in the ordered list for peer
// goroutines to call .NextWait() on.
// FIXME: leaking implementation details!
func (mem *CListMempool) TxsFront() *clist.CElement {
return mem.txs.Front()
}
// TxsWaitChan returns a channel to wait on transactions. It will be closed
// once the mempool is not empty (ie. the internal `mem.txs` has at least one
// element)
func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
return mem.txs.WaitChan()
}
// It blocks if we're waiting on Update() or Reap().
// cb: A callback from the CheckTx command.
// It gets called from another goroutine.
// CONTRACT: Either cb will get called, or err returned.
func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
return mem.CheckTxWithInfo(tx, cb, TxInfo{SenderID: UnknownPeerID})
}
func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) {
mem.proxyMtx.Lock()
// use defer to unlock mutex because application (*local client*) might panic
defer mem.proxyMtx.Unlock()
var (
memSize = mem.Size()
txsBytes = mem.TxsBytes()
)
if memSize >= mem.config.Size ||
int64(len(tx))+txsBytes > mem.config.MaxTxsBytes {
return ErrMempoolIsFull{
memSize, mem.config.Size,
txsBytes, mem.config.MaxTxsBytes}
}
// The size of the corresponding amino-encoded TxMessage
// can't be larger than the maxMsgSize, otherwise we can't
// relay it to peers.
if len(tx) > maxTxSize {
return ErrTxTooLarge
}
if mem.preCheck != nil {
if err := mem.preCheck(tx); err != nil {
return ErrPreCheck{err}
}
}
// CACHE
if !mem.cache.Push(tx) {
// Record a new sender for a tx we've already seen.
// Note it's possible a tx is still in the cache but no longer in the mempool
// (eg. after committing a block, txs are removed from mempool but not cache),
// so we only record the sender for txs still in the mempool.
if e, ok := mem.txsMap.Load(txKey(tx)); ok {
memTx := e.(*clist.CElement).Value.(*mempoolTx)
if _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true); loaded {
// TODO: consider punishing peer for dups,
// its non-trivial since invalid txs can become valid,
// but they can spam the same tx with little cost to them atm.
}
}
return ErrTxInCache
}
// END CACHE
// WAL
if mem.wal != nil {
// TODO: Notify administrators when WAL fails
_, err := mem.wal.Write([]byte(tx))
if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
_, err = mem.wal.Write([]byte("\n"))
if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
}
// END WAL
// NOTE: proxyAppConn may error if tx buffer is full
if err = mem.proxyAppConn.Error(); err != nil {
return err
}
reqRes := mem.proxyAppConn.CheckTxAsync(tx)
reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, cb))
return nil
}
// Global callback that will be called after every ABCI response.
// Having a single global callback avoids needing to set a callback for each request.
// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who),
// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that
// include this information. If we're not in the midst of a recheck, this function will just return,
// so the request specific callback can do the work.
// When rechecking, we don't need the peerID, so the recheck callback happens here.
func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) {
if mem.recheckCursor == nil {
return
}
mem.metrics.RecheckTimes.Add(1)
mem.resCbRecheck(req, res)
// update metrics
mem.metrics.Size.Set(float64(mem.Size()))
}
// Request specific callback that should be set on individual reqRes objects
// to incorporate local information when processing the response.
// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them.
// NOTE: alternatively, we could include this information in the ABCI request itself.
//
// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called
// when all other response processing is complete.
//
// Used in CheckTxWithInfo to record PeerID who sent us the tx.
func (mem *CListMempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) {
return func(res *abci.Response) {
if mem.recheckCursor != nil {
// this should never happen
panic("recheck cursor is not nil in reqResCb")
}
mem.resCbFirstTime(tx, peerID, res)
// update metrics
mem.metrics.Size.Set(float64(mem.Size()))
// passed in by the caller of CheckTx, eg. the RPC
if externalCb != nil {
externalCb(res)
}
}
}
// Called from:
// - resCbFirstTime (lock not held) if tx is valid
func (mem *CListMempool) addTx(memTx *mempoolTx) {
e := mem.txs.PushBack(memTx)
mem.txsMap.Store(txKey(memTx.tx), e)
atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
}
// Called from:
// - Update (lock held) if tx was committed
// - resCbRecheck (lock not held) if tx was invalidated
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
mem.txs.Remove(elem)
elem.DetachPrev()
mem.txsMap.Delete(txKey(tx))
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
if removeFromCache {
mem.cache.Remove(tx)
}
}
// callback, which is called after the app checked the tx for the first time.
//
// The case where the app checks the tx for the second and subsequent times is
// handled by the resCbRecheck callback.
func (mem *CListMempool) resCbFirstTime(tx []byte, peerID uint16, res *abci.Response) {
switch r := res.Value.(type) {
case *abci.Response_CheckTx:
var postCheckErr error
if mem.postCheck != nil {
postCheckErr = mem.postCheck(tx, r.CheckTx)
}
if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
memTx := &mempoolTx{
height: mem.height,
gasWanted: r.CheckTx.GasWanted,
tx: tx,
}
memTx.senders.Store(peerID, true)
mem.addTx(memTx)
mem.logger.Info("Added good transaction",
"tx", txID(tx),
"res", r,
"height", memTx.height,
"total", mem.Size(),
)
mem.notifyTxsAvailable()
} else {
// ignore bad transaction
mem.logger.Info("Rejected bad transaction", "tx", txID(tx), "res", r, "err", postCheckErr)
mem.metrics.FailedTxs.Add(1)
// remove from cache (it might be good later)
mem.cache.Remove(tx)
}
default:
// ignore other messages
}
}
// callback, which is called after the app rechecked the tx.
//
// The case where the app checks the tx for the first time is handled by the
// resCbFirstTime callback.
func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *abci.Response_CheckTx:
tx := req.GetCheckTx().Tx
memTx := mem.recheckCursor.Value.(*mempoolTx)
if !bytes.Equal(tx, memTx.tx) {
panic(fmt.Sprintf(
"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
memTx.tx,
tx))
}
var postCheckErr error
if mem.postCheck != nil {
postCheckErr = mem.postCheck(tx, r.CheckTx)
}
if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
// Good, nothing to do.
} else {
// Tx became invalidated due to newly committed block.
mem.logger.Info("Tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr)
// NOTE: we remove tx from the cache because it might be good later
mem.removeTx(tx, mem.recheckCursor, true)
}
if mem.recheckCursor == mem.recheckEnd {
mem.recheckCursor = nil
} else {
mem.recheckCursor = mem.recheckCursor.Next()
}
if mem.recheckCursor == nil {
// Done!
atomic.StoreInt32(&mem.rechecking, 0)
mem.logger.Info("Done rechecking txs")
// incase the recheck removed all txs
if mem.Size() > 0 {
mem.notifyTxsAvailable()
}
}
default:
// ignore other messages
}
}
func (mem *CListMempool) TxsAvailable() <-chan struct{} {
return mem.txsAvailable
}
func (mem *CListMempool) notifyTxsAvailable() {
if mem.Size() == 0 {
panic("notified txs available but mempool is empty!")
}
if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
// channel cap is 1, so this will send once
mem.notifiedTxsAvailable = true
select {
case mem.txsAvailable <- struct{}{}:
default:
}
}
}
func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
for atomic.LoadInt32(&mem.rechecking) > 0 {
// TODO: Something better?
time.Sleep(time.Millisecond * 10)
}
var totalBytes int64
var totalGas int64
// TODO: we will get a performance boost if we have a good estimate of avg
// size per tx, and set the initial capacity based off of that.
// txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize))
txs := make([]types.Tx, 0, mem.txs.Len())
for e := mem.txs.Front(); e != nil; e = e.Next() {
memTx := e.Value.(*mempoolTx)
// Check total size requirement
aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1)
if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes {
return txs
}
totalBytes += int64(len(memTx.tx)) + aminoOverhead
// Check total gas requirement.
// If maxGas is negative, skip this check.
// Since newTotalGas < masGas, which
// must be non-negative, it follows that this won't overflow.
newTotalGas := totalGas + memTx.gasWanted
if maxGas > -1 && newTotalGas > maxGas {
return txs
}
totalGas = newTotalGas
txs = append(txs, memTx.tx)
}
return txs
}
func (mem *CListMempool) ReapMaxTxs(max int) types.Txs {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
if max < 0 {
max = mem.txs.Len()
}
for atomic.LoadInt32(&mem.rechecking) > 0 {
// TODO: Something better?
time.Sleep(time.Millisecond * 10)
}
txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max))
for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() {
memTx := e.Value.(*mempoolTx)
txs = append(txs, memTx.tx)
}
return txs
}
func (mem *CListMempool) Update(
height int64,
txs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
preCheck PreCheckFunc,
postCheck PostCheckFunc,
) error {
// Set height
mem.height = height
mem.notifiedTxsAvailable = false
if preCheck != nil {
mem.preCheck = preCheck
}
if postCheck != nil {
mem.postCheck = postCheck
}
for i, tx := range txs {
if deliverTxResponses[i].Code == abci.CodeTypeOK {
// Add valid committed tx to the cache (if missing).
_ = mem.cache.Push(tx)
// Remove valid committed tx from the mempool.
if e, ok := mem.txsMap.Load(txKey(tx)); ok {
mem.removeTx(tx, e.(*clist.CElement), false)
}
} else {
// Allow invalid transactions to be resubmitted.
mem.cache.Remove(tx)
// Don't remove invalid tx from the mempool.
// Otherwise evil proposer can drop valid txs.
// Example:
// 100 -> 101 -> 102
// Block, proposed by evil proposer:
// 101 -> 102
// Mempool (if you remove txs):
// 100
// https://github.com/tendermint/tendermint/issues/3322.
}
}
// Either recheck non-committed txs to see if they became invalid
// or just notify there're some txs left.
if mem.Size() > 0 {
if mem.config.Recheck {
mem.logger.Info("Recheck txs", "numtxs", mem.Size(), "height", height)
mem.recheckTxs()
// At this point, mem.txs are being rechecked.
// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
} else {
mem.notifyTxsAvailable()
}
}
// Update metrics
mem.metrics.Size.Set(float64(mem.Size()))
return nil
}
func (mem *CListMempool) recheckTxs() {
if mem.Size() == 0 {
panic("recheckTxs is called, but the mempool is empty")
}
atomic.StoreInt32(&mem.rechecking, 1)
mem.recheckCursor = mem.txs.Front()
mem.recheckEnd = mem.txs.Back()
// Push txs to proxyAppConn
// NOTE: globalCb may be called concurrently.
for e := mem.txs.Front(); e != nil; e = e.Next() {
memTx := e.Value.(*mempoolTx)
mem.proxyAppConn.CheckTxAsync(memTx.tx)
}
mem.proxyAppConn.FlushAsync()
}
//--------------------------------------------------------------------------------
// mempoolTx is a transaction that successfully ran
type mempoolTx struct {
height int64 // height that this tx had been validated in
gasWanted int64 // amount of gas this tx states it will require
tx types.Tx //
// ids of peers who've sent us this tx (as a map for quick lookups).
// senders: PeerID -> bool
senders sync.Map
}
// Height returns the height for this transaction
func (memTx *mempoolTx) Height() int64 {
return atomic.LoadInt64(&memTx.height)
}
//--------------------------------------------------------------------------------
type txCache interface {
Reset()
Push(tx types.Tx) bool
Remove(tx types.Tx)
}
// mapTxCache maintains a LRU cache of transactions. This only stores the hash
// of the tx, due to memory concerns.
type mapTxCache struct {
mtx sync.Mutex
size int
map_ map[[sha256.Size]byte]*list.Element
list *list.List
}
var _ txCache = (*mapTxCache)(nil)
// newMapTxCache returns a new mapTxCache.
func newMapTxCache(cacheSize int) *mapTxCache {
return &mapTxCache{
size: cacheSize,
map_: make(map[[sha256.Size]byte]*list.Element, cacheSize),
list: list.New(),
}
}
// Reset resets the cache to an empty state.
func (cache *mapTxCache) Reset() {
cache.mtx.Lock()
cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size)
cache.list.Init()
cache.mtx.Unlock()
}
// Push adds the given tx to the cache and returns true. It returns
// false if tx is already in the cache.
func (cache *mapTxCache) Push(tx types.Tx) bool {
cache.mtx.Lock()
defer cache.mtx.Unlock()
// Use the tx hash in the cache
txHash := txKey(tx)
if moved, exists := cache.map_[txHash]; exists {
cache.list.MoveToBack(moved)
return false
}
if cache.list.Len() >= cache.size {
popped := cache.list.Front()
poppedTxHash := popped.Value.([sha256.Size]byte)
delete(cache.map_, poppedTxHash)
if popped != nil {
cache.list.Remove(popped)
}
}
e := cache.list.PushBack(txHash)
cache.map_[txHash] = e
return true
}
// Remove removes the given tx from the cache.
func (cache *mapTxCache) Remove(tx types.Tx) {
cache.mtx.Lock()
txHash := txKey(tx)
popped := cache.map_[txHash]
delete(cache.map_, txHash)
if popped != nil {
cache.list.Remove(popped)
}
cache.mtx.Unlock()
}
type nopTxCache struct{}
var _ txCache = (*nopTxCache)(nil)
func (nopTxCache) Reset() {}
func (nopTxCache) Push(types.Tx) bool { return true }
func (nopTxCache) Remove(types.Tx) {}
//--------------------------------------------------------------------------------
// txKey is the fixed length array sha256 hash used as the key in maps.
func txKey(tx types.Tx) [sha256.Size]byte {
return sha256.Sum256(tx)
}
// txID is the hex encoded hash of the bytes as a types.Tx.
func txID(tx []byte) string {
return fmt.Sprintf("%X", types.Tx(tx).Hash())
}

mempool/mempool_test.go → mempool/clist_mempool_test.go View File


+ 24
- 0
mempool/doc.go View File

@ -0,0 +1,24 @@
// The mempool pushes new txs onto the proxyAppConn.
// It gets a stream of (req, res) tuples from the proxy.
// The mempool stores good txs in a concurrent linked-list.
// Multiple concurrent go-routines can traverse this linked-list
// safely by calling .NextWait() on each element.
// So we have several go-routines:
// 1. Consensus calling Update() and Reap() synchronously
// 2. Many mempool reactor's peer routines calling CheckTx()
// 3. Many mempool reactor's peer routines traversing the txs linked list
// 4. Another goroutine calling GarbageCollectTxs() periodically
// To manage these goroutines, there are three methods of locking.
// 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe)
// 2. Mutations to the linked-list elements are atomic
// 3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx
// Garbage collection of old elements from mempool.txs is handlde via
// the DetachPrev() call, which makes old elements not reachable by
// peer broadcastTxRoutine() automatically garbage collected.
// TODO: Better handle abci client errors. (make it automatically handle connection errors)
package mempool

+ 46
- 0
mempool/errors.go View File

@ -0,0 +1,46 @@
package mempool
import (
"fmt"
"github.com/pkg/errors"
)
var (
// ErrTxInCache is returned to the client if we saw tx earlier
ErrTxInCache = errors.New("Tx already exists in cache")
// ErrTxTooLarge means the tx is too big to be sent in a message to other peers
ErrTxTooLarge = fmt.Errorf("Tx too large. Max size is %d", maxTxSize)
)
// ErrMempoolIsFull means Tendermint & an application can't handle that much load
type ErrMempoolIsFull struct {
numTxs int
maxTxs int
txsBytes int64
maxTxsBytes int64
}
func (e ErrMempoolIsFull) Error() string {
return fmt.Sprintf(
"mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)",
e.numTxs, e.maxTxs,
e.txsBytes, e.maxTxsBytes)
}
// ErrPreCheck is returned when tx is too big
type ErrPreCheck struct {
Reason error
}
func (e ErrPreCheck) Error() string {
return e.Reason.Error()
}
// IsPreCheckError returns true if err is due to pre check failure.
func IsPreCheckError(err error) bool {
_, ok := err.(ErrPreCheck)
return ok
}

+ 69
- 793
mempool/mempool.go View File

@ -1,111 +1,102 @@
package mempool
import (
"bytes"
"container/list"
"crypto/sha256"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
auto "github.com/tendermint/tendermint/libs/autofile"
"github.com/tendermint/tendermint/libs/clist"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
// PreCheckFunc is an optional filter executed before CheckTx and rejects
// transaction if false is returned. An example would be to ensure that a
// transaction doesn't exceeded the block size.
type PreCheckFunc func(types.Tx) error
// Mempool defines the mempool interface.
//
// Updates to the mempool need to be synchronized with committing a block so
// apps can reset their transient state on Commit.
type Mempool interface {
// CheckTx executes a new transaction against the application to determine
// its validity and whether it should be added to the mempool.
CheckTx(tx types.Tx, callback func(*abci.Response)) error
// PostCheckFunc is an optional filter executed after CheckTx and rejects
// transaction if false is returned. An example would be to ensure a
// transaction doesn't require more gas than available for the block.
type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error
// CheckTxWithInfo performs the same operation as CheckTx, but with extra
// meta data about the tx.
// Currently this metadata is the peer who sent it, used to prevent the tx
// from being gossiped back to them.
CheckTxWithInfo(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error
// TxInfo are parameters that get passed when attempting to add a tx to the
// mempool.
type TxInfo struct {
// We don't use p2p.ID here because it's too big. The gain is to store max 2
// bytes with each tx to identify the sender rather than 20 bytes.
PeerID uint16
}
// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes
// bytes total with the condition that the total gasWanted must be less than
// maxGas.
// If both maxes are negative, there is no cap on the size of all returned
// transactions (~ all available transactions).
ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs
/*
// ReapMaxTxs reaps up to max transactions from the mempool.
// If max is negative, there is no cap on the size of all returned
// transactions (~ all available transactions).
ReapMaxTxs(max int) types.Txs
The mempool pushes new txs onto the proxyAppConn.
It gets a stream of (req, res) tuples from the proxy.
The mempool stores good txs in a concurrent linked-list.
// Lock locks the mempool. The consensus must be able to hold lock to safely update.
Lock()
Multiple concurrent go-routines can traverse this linked-list
safely by calling .NextWait() on each element.
// Unlock unlocks the mempool.
Unlock()
So we have several go-routines:
1. Consensus calling Update() and Reap() synchronously
2. Many mempool reactor's peer routines calling CheckTx()
3. Many mempool reactor's peer routines traversing the txs linked list
4. Another goroutine calling GarbageCollectTxs() periodically
// Update informs the mempool that the given txs were committed and can be discarded.
// NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller
Update(blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, newPreFn PreCheckFunc, newPostFn PostCheckFunc) error
To manage these goroutines, there are three methods of locking.
1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe)
2. Mutations to the linked-list elements are atomic
3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx
// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are
// done. E.g. from CheckTx.
FlushAppConn() error
Garbage collection of old elements from mempool.txs is handlde via
the DetachPrev() call, which makes old elements not reachable by
peer broadcastTxRoutine() automatically garbage collected.
// Flush removes all transactions from the mempool and cache
Flush()
TODO: Better handle abci client errors. (make it automatically handle connection errors)
// TxsAvailable returns a channel which fires once for every height,
// and only when transactions are available in the mempool.
// NOTE: the returned channel may be nil if EnableTxsAvailable was not called.
TxsAvailable() <-chan struct{}
*/
// EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will
// trigger once every height when transactions are available.
EnableTxsAvailable()
var (
// ErrTxInCache is returned to the client if we saw tx earlier
ErrTxInCache = errors.New("Tx already exists in cache")
// Size returns the number of transactions in the mempool.
Size() int
// ErrTxTooLarge means the tx is too big to be sent in a message to other peers
ErrTxTooLarge = fmt.Errorf("Tx too large. Max size is %d", maxTxSize)
)
// TxsBytes returns the total size of all txs in the mempool.
TxsBytes() int64
// ErrMempoolIsFull means Tendermint & an application can't handle that much load
type ErrMempoolIsFull struct {
numTxs int
maxTxs int
// InitWAL creates a directory for the WAL file and opens a file itself.
InitWAL()
txsBytes int64
maxTxsBytes int64
// CloseWAL closes and discards the underlying WAL file.
// Any further writes will not be relayed to disk.
CloseWAL()
}
func (e ErrMempoolIsFull) Error() string {
return fmt.Sprintf(
"Mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)",
e.numTxs, e.maxTxs,
e.txsBytes, e.maxTxsBytes)
}
//--------------------------------------------------------------------------------
// ErrPreCheck is returned when tx is too big
type ErrPreCheck struct {
Reason error
}
// PreCheckFunc is an optional filter executed before CheckTx and rejects
// transaction if false is returned. An example would be to ensure that a
// transaction doesn't exceeded the block size.
type PreCheckFunc func(types.Tx) error
func (e ErrPreCheck) Error() string {
return e.Reason.Error()
}
// PostCheckFunc is an optional filter executed after CheckTx and rejects
// transaction if false is returned. An example would be to ensure a
// transaction doesn't require more gas than available for the block.
type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error
// IsPreCheckError returns true if err is due to pre check failure.
func IsPreCheckError(err error) bool {
_, ok := err.(ErrPreCheck)
return ok
// TxInfo are parameters that get passed when attempting to add a tx to the
// mempool.
type TxInfo struct {
// We don't use p2p.ID here because it's too big. The gain is to store max 2
// bytes with each tx to identify the sender rather than 20 bytes.
SenderID uint16
}
//--------------------------------------------------------------------------------
// PreCheckAminoMaxBytes checks that the size of the transaction plus the amino
// overhead is smaller or equal to the expected maxBytes.
func PreCheckAminoMaxBytes(maxBytes int64) PreCheckFunc {
@ -143,718 +134,3 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc {
return nil
}
}
// TxID is the hex encoded hash of the bytes as a types.Tx.
func TxID(tx []byte) string {
return fmt.Sprintf("%X", types.Tx(tx).Hash())
}
// txKey is the fixed length array sha256 hash used as the key in maps.
func txKey(tx types.Tx) [sha256.Size]byte {
return sha256.Sum256(tx)
}
// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus
// round. Transaction validity is checked using the CheckTx abci message before the transaction is
// added to the pool. The Mempool uses a concurrent list structure for storing transactions that
// can be efficiently accessed by multiple concurrent readers.
type Mempool struct {
config *cfg.MempoolConfig
proxyMtx sync.Mutex
proxyAppConn proxy.AppConnMempool
txs *clist.CList // concurrent linked-list of good txs
preCheck PreCheckFunc
postCheck PostCheckFunc
// Track whether we're rechecking txs.
// These are not protected by a mutex and are expected to be mutated
// in serial (ie. by abci responses which are called in serial).
recheckCursor *clist.CElement // next expected response
recheckEnd *clist.CElement // re-checking stops here
// notify listeners (ie. consensus) when txs are available
notifiedTxsAvailable bool
txsAvailable chan struct{} // fires once for each height, when the mempool is not empty
// Map for quick access to txs to record sender in CheckTx.
// txsMap: txKey -> CElement
txsMap sync.Map
// Atomic integers
height int64 // the last block Update()'d to
rechecking int32 // for re-checking filtered txs on Update()
txsBytes int64 // total size of mempool, in bytes
// Keep a cache of already-seen txs.
// This reduces the pressure on the proxyApp.
cache txCache
// A log of mempool txs
wal *auto.AutoFile
logger log.Logger
metrics *Metrics
}
// MempoolOption sets an optional parameter on the Mempool.
type MempoolOption func(*Mempool)
// NewMempool returns a new Mempool with the given configuration and connection to an application.
func NewMempool(
config *cfg.MempoolConfig,
proxyAppConn proxy.AppConnMempool,
height int64,
options ...MempoolOption,
) *Mempool {
mempool := &Mempool{
config: config,
proxyAppConn: proxyAppConn,
txs: clist.New(),
height: height,
rechecking: 0,
recheckCursor: nil,
recheckEnd: nil,
logger: log.NewNopLogger(),
metrics: NopMetrics(),
}
if config.CacheSize > 0 {
mempool.cache = newMapTxCache(config.CacheSize)
} else {
mempool.cache = nopTxCache{}
}
proxyAppConn.SetResponseCallback(mempool.globalCb)
for _, option := range options {
option(mempool)
}
return mempool
}
// EnableTxsAvailable initializes the TxsAvailable channel,
// ensuring it will trigger once every height when transactions are available.
// NOTE: not thread safe - should only be called once, on startup
func (mem *Mempool) EnableTxsAvailable() {
mem.txsAvailable = make(chan struct{}, 1)
}
// SetLogger sets the Logger.
func (mem *Mempool) SetLogger(l log.Logger) {
mem.logger = l
}
// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
// false. This is ran before CheckTx.
func WithPreCheck(f PreCheckFunc) MempoolOption {
return func(mem *Mempool) { mem.preCheck = f }
}
// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
// false. This is ran after CheckTx.
func WithPostCheck(f PostCheckFunc) MempoolOption {
return func(mem *Mempool) { mem.postCheck = f }
}
// WithMetrics sets the metrics.
func WithMetrics(metrics *Metrics) MempoolOption {
return func(mem *Mempool) { mem.metrics = metrics }
}
// InitWAL creates a directory for the WAL file and opens a file itself.
//
// *panics* if can't create directory or open file.
// *not thread safe*
func (mem *Mempool) InitWAL() {
walDir := mem.config.WalDir()
err := cmn.EnsureDir(walDir, 0700)
if err != nil {
panic(errors.Wrap(err, "Error ensuring Mempool WAL dir"))
}
af, err := auto.OpenAutoFile(walDir + "/wal")
if err != nil {
panic(errors.Wrap(err, "Error opening Mempool WAL file"))
}
mem.wal = af
}
// CloseWAL closes and discards the underlying WAL file.
// Any further writes will not be relayed to disk.
func (mem *Mempool) CloseWAL() {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
if err := mem.wal.Close(); err != nil {
mem.logger.Error("Error closing WAL", "err", err)
}
mem.wal = nil
}
// Lock locks the mempool. The consensus must be able to hold lock to safely update.
func (mem *Mempool) Lock() {
mem.proxyMtx.Lock()
}
// Unlock unlocks the mempool.
func (mem *Mempool) Unlock() {
mem.proxyMtx.Unlock()
}
// Size returns the number of transactions in the mempool.
func (mem *Mempool) Size() int {
return mem.txs.Len()
}
// TxsBytes returns the total size of all txs in the mempool.
func (mem *Mempool) TxsBytes() int64 {
return atomic.LoadInt64(&mem.txsBytes)
}
// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are
// done. E.g. from CheckTx.
func (mem *Mempool) FlushAppConn() error {
return mem.proxyAppConn.FlushSync()
}
// Flush removes all transactions from the mempool and cache
func (mem *Mempool) Flush() {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
mem.cache.Reset()
for e := mem.txs.Front(); e != nil; e = e.Next() {
mem.txs.Remove(e)
e.DetachPrev()
}
mem.txsMap = sync.Map{}
_ = atomic.SwapInt64(&mem.txsBytes, 0)
}
// TxsFront returns the first transaction in the ordered list for peer
// goroutines to call .NextWait() on.
func (mem *Mempool) TxsFront() *clist.CElement {
return mem.txs.Front()
}
// TxsWaitChan returns a channel to wait on transactions. It will be closed
// once the mempool is not empty (ie. the internal `mem.txs` has at least one
// element)
func (mem *Mempool) TxsWaitChan() <-chan struct{} {
return mem.txs.WaitChan()
}
// CheckTx executes a new transaction against the application to determine its validity
// and whether it should be added to the mempool.
// It blocks if we're waiting on Update() or Reap().
// cb: A callback from the CheckTx command.
// It gets called from another goroutine.
// CONTRACT: Either cb will get called, or err returned.
func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
return mem.CheckTxWithInfo(tx, cb, TxInfo{PeerID: UnknownPeerID})
}
// CheckTxWithInfo performs the same operation as CheckTx, but with extra meta data about the tx.
// Currently this metadata is the peer who sent it,
// used to prevent the tx from being gossiped back to them.
func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) {
mem.proxyMtx.Lock()
// use defer to unlock mutex because application (*local client*) might panic
defer mem.proxyMtx.Unlock()
var (
memSize = mem.Size()
txsBytes = mem.TxsBytes()
)
if memSize >= mem.config.Size ||
int64(len(tx))+txsBytes > mem.config.MaxTxsBytes {
return ErrMempoolIsFull{
memSize, mem.config.Size,
txsBytes, mem.config.MaxTxsBytes}
}
// The size of the corresponding amino-encoded TxMessage
// can't be larger than the maxMsgSize, otherwise we can't
// relay it to peers.
if len(tx) > maxTxSize {
return ErrTxTooLarge
}
if mem.preCheck != nil {
if err := mem.preCheck(tx); err != nil {
return ErrPreCheck{err}
}
}
// CACHE
if !mem.cache.Push(tx) {
// Record a new sender for a tx we've already seen.
// Note it's possible a tx is still in the cache but no longer in the mempool
// (eg. after committing a block, txs are removed from mempool but not cache),
// so we only record the sender for txs still in the mempool.
if e, ok := mem.txsMap.Load(txKey(tx)); ok {
memTx := e.(*clist.CElement).Value.(*mempoolTx)
if _, loaded := memTx.senders.LoadOrStore(txInfo.PeerID, true); loaded {
// TODO: consider punishing peer for dups,
// its non-trivial since invalid txs can become valid,
// but they can spam the same tx with little cost to them atm.
}
}
return ErrTxInCache
}
// END CACHE
// WAL
if mem.wal != nil {
// TODO: Notify administrators when WAL fails
_, err := mem.wal.Write([]byte(tx))
if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
_, err = mem.wal.Write([]byte("\n"))
if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
}
// END WAL
// NOTE: proxyAppConn may error if tx buffer is full
if err = mem.proxyAppConn.Error(); err != nil {
return err
}
reqRes := mem.proxyAppConn.CheckTxAsync(tx)
reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID, cb))
return nil
}
// Global callback that will be called after every ABCI response.
// Having a single global callback avoids needing to set a callback for each request.
// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who),
// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that
// include this information. If we're not in the midst of a recheck, this function will just return,
// so the request specific callback can do the work.
// When rechecking, we don't need the peerID, so the recheck callback happens here.
func (mem *Mempool) globalCb(req *abci.Request, res *abci.Response) {
if mem.recheckCursor == nil {
return
}
mem.metrics.RecheckTimes.Add(1)
mem.resCbRecheck(req, res)
// update metrics
mem.metrics.Size.Set(float64(mem.Size()))
}
// Request specific callback that should be set on individual reqRes objects
// to incorporate local information when processing the response.
// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them.
// NOTE: alternatively, we could include this information in the ABCI request itself.
//
// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called
// when all other response processing is complete.
//
// Used in CheckTxWithInfo to record PeerID who sent us the tx.
func (mem *Mempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) {
return func(res *abci.Response) {
if mem.recheckCursor != nil {
// this should never happen
panic("recheck cursor is not nil in reqResCb")
}
mem.resCbFirstTime(tx, peerID, res)
// update metrics
mem.metrics.Size.Set(float64(mem.Size()))
// passed in by the caller of CheckTx, eg. the RPC
if externalCb != nil {
externalCb(res)
}
}
}
// Called from:
// - resCbFirstTime (lock not held) if tx is valid
func (mem *Mempool) addTx(memTx *mempoolTx) {
e := mem.txs.PushBack(memTx)
mem.txsMap.Store(txKey(memTx.tx), e)
atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
}
// Called from:
// - Update (lock held) if tx was committed
// - resCbRecheck (lock not held) if tx was invalidated
func (mem *Mempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
mem.txs.Remove(elem)
elem.DetachPrev()
mem.txsMap.Delete(txKey(tx))
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
if removeFromCache {
mem.cache.Remove(tx)
}
}
// callback, which is called after the app checked the tx for the first time.
//
// The case where the app checks the tx for the second and subsequent times is
// handled by the resCbRecheck callback.
func (mem *Mempool) resCbFirstTime(tx []byte, peerID uint16, res *abci.Response) {
switch r := res.Value.(type) {
case *abci.Response_CheckTx:
var postCheckErr error
if mem.postCheck != nil {
postCheckErr = mem.postCheck(tx, r.CheckTx)
}
if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
memTx := &mempoolTx{
height: mem.height,
gasWanted: r.CheckTx.GasWanted,
tx: tx,
}
memTx.senders.Store(peerID, true)
mem.addTx(memTx)
mem.logger.Info("Added good transaction",
"tx", TxID(tx),
"res", r,
"height", memTx.height,
"total", mem.Size(),
)
mem.notifyTxsAvailable()
} else {
// ignore bad transaction
mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r, "err", postCheckErr)
mem.metrics.FailedTxs.Add(1)
// remove from cache (it might be good later)
mem.cache.Remove(tx)
}
default:
// ignore other messages
}
}
// callback, which is called after the app rechecked the tx.
//
// The case where the app checks the tx for the first time is handled by the
// resCbFirstTime callback.
func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *abci.Response_CheckTx:
tx := req.GetCheckTx().Tx
memTx := mem.recheckCursor.Value.(*mempoolTx)
if !bytes.Equal(tx, memTx.tx) {
panic(fmt.Sprintf(
"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
memTx.tx,
tx))
}
var postCheckErr error
if mem.postCheck != nil {
postCheckErr = mem.postCheck(tx, r.CheckTx)
}
if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
// Good, nothing to do.
} else {
// Tx became invalidated due to newly committed block.
mem.logger.Info("Tx is no longer valid", "tx", TxID(tx), "res", r, "err", postCheckErr)
// NOTE: we remove tx from the cache because it might be good later
mem.removeTx(tx, mem.recheckCursor, true)
}
if mem.recheckCursor == mem.recheckEnd {
mem.recheckCursor = nil
} else {
mem.recheckCursor = mem.recheckCursor.Next()
}
if mem.recheckCursor == nil {
// Done!
atomic.StoreInt32(&mem.rechecking, 0)
mem.logger.Info("Done rechecking txs")
// incase the recheck removed all txs
if mem.Size() > 0 {
mem.notifyTxsAvailable()
}
}
default:
// ignore other messages
}
}
// TxsAvailable returns a channel which fires once for every height,
// and only when transactions are available in the mempool.
// NOTE: the returned channel may be nil if EnableTxsAvailable was not called.
func (mem *Mempool) TxsAvailable() <-chan struct{} {
return mem.txsAvailable
}
func (mem *Mempool) notifyTxsAvailable() {
if mem.Size() == 0 {
panic("notified txs available but mempool is empty!")
}
if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
// channel cap is 1, so this will send once
mem.notifiedTxsAvailable = true
select {
case mem.txsAvailable <- struct{}{}:
default:
}
}
}
// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes bytes total
// with the condition that the total gasWanted must be less than maxGas.
// If both maxes are negative, there is no cap on the size of all returned
// transactions (~ all available transactions).
func (mem *Mempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
for atomic.LoadInt32(&mem.rechecking) > 0 {
// TODO: Something better?
time.Sleep(time.Millisecond * 10)
}
var totalBytes int64
var totalGas int64
// TODO: we will get a performance boost if we have a good estimate of avg
// size per tx, and set the initial capacity based off of that.
// txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize))
txs := make([]types.Tx, 0, mem.txs.Len())
for e := mem.txs.Front(); e != nil; e = e.Next() {
memTx := e.Value.(*mempoolTx)
// Check total size requirement
aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1)
if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes {
return txs
}
totalBytes += int64(len(memTx.tx)) + aminoOverhead
// Check total gas requirement.
// If maxGas is negative, skip this check.
// Since newTotalGas < masGas, which
// must be non-negative, it follows that this won't overflow.
newTotalGas := totalGas + memTx.gasWanted
if maxGas > -1 && newTotalGas > maxGas {
return txs
}
totalGas = newTotalGas
txs = append(txs, memTx.tx)
}
return txs
}
// ReapMaxTxs reaps up to max transactions from the mempool.
// If max is negative, there is no cap on the size of all returned
// transactions (~ all available transactions).
func (mem *Mempool) ReapMaxTxs(max int) types.Txs {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
if max < 0 {
max = mem.txs.Len()
}
for atomic.LoadInt32(&mem.rechecking) > 0 {
// TODO: Something better?
time.Sleep(time.Millisecond * 10)
}
txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max))
for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() {
memTx := e.Value.(*mempoolTx)
txs = append(txs, memTx.tx)
}
return txs
}
// Update informs the mempool that the given txs were committed and can be discarded.
// NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller
func (mem *Mempool) Update(
height int64,
txs types.Txs,
preCheck PreCheckFunc,
postCheck PostCheckFunc,
) error {
// Set height
mem.height = height
mem.notifiedTxsAvailable = false
if preCheck != nil {
mem.preCheck = preCheck
}
if postCheck != nil {
mem.postCheck = postCheck
}
// Add committed transactions to cache (if missing).
for _, tx := range txs {
_ = mem.cache.Push(tx)
}
// Remove committed transactions.
txsLeft := mem.removeTxs(txs)
// Either recheck non-committed txs to see if they became invalid
// or just notify there're some txs left.
if len(txsLeft) > 0 {
if mem.config.Recheck {
mem.logger.Info("Recheck txs", "numtxs", len(txsLeft), "height", height)
mem.recheckTxs(txsLeft)
// At this point, mem.txs are being rechecked.
// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
} else {
mem.notifyTxsAvailable()
}
}
// Update metrics
mem.metrics.Size.Set(float64(mem.Size()))
return nil
}
func (mem *Mempool) removeTxs(txs types.Txs) []types.Tx {
// Build a map for faster lookups.
txsMap := make(map[string]struct{}, len(txs))
for _, tx := range txs {
txsMap[string(tx)] = struct{}{}
}
txsLeft := make([]types.Tx, 0, mem.txs.Len())
for e := mem.txs.Front(); e != nil; e = e.Next() {
memTx := e.Value.(*mempoolTx)
// Remove the tx if it's already in a block.
if _, ok := txsMap[string(memTx.tx)]; ok {
// NOTE: we don't remove committed txs from the cache.
mem.removeTx(memTx.tx, e, false)
continue
}
txsLeft = append(txsLeft, memTx.tx)
}
return txsLeft
}
// NOTE: pass in txs because mem.txs can mutate concurrently.
func (mem *Mempool) recheckTxs(txs []types.Tx) {
if len(txs) == 0 {
return
}
atomic.StoreInt32(&mem.rechecking, 1)
mem.recheckCursor = mem.txs.Front()
mem.recheckEnd = mem.txs.Back()
// Push txs to proxyAppConn
// NOTE: globalCb may be called concurrently.
for _, tx := range txs {
mem.proxyAppConn.CheckTxAsync(tx)
}
mem.proxyAppConn.FlushAsync()
}
//--------------------------------------------------------------------------------
// mempoolTx is a transaction that successfully ran
type mempoolTx struct {
height int64 // height that this tx had been validated in
gasWanted int64 // amount of gas this tx states it will require
tx types.Tx //
// ids of peers who've sent us this tx (as a map for quick lookups).
// senders: PeerID -> bool
senders sync.Map
}
// Height returns the height for this transaction
func (memTx *mempoolTx) Height() int64 {
return atomic.LoadInt64(&memTx.height)
}
//--------------------------------------------------------------------------------
type txCache interface {
Reset()
Push(tx types.Tx) bool
Remove(tx types.Tx)
}
// mapTxCache maintains a LRU cache of transactions. This only stores the hash
// of the tx, due to memory concerns.
type mapTxCache struct {
mtx sync.Mutex
size int
map_ map[[sha256.Size]byte]*list.Element
list *list.List
}
var _ txCache = (*mapTxCache)(nil)
// newMapTxCache returns a new mapTxCache.
func newMapTxCache(cacheSize int) *mapTxCache {
return &mapTxCache{
size: cacheSize,
map_: make(map[[sha256.Size]byte]*list.Element, cacheSize),
list: list.New(),
}
}
// Reset resets the cache to an empty state.
func (cache *mapTxCache) Reset() {
cache.mtx.Lock()
cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size)
cache.list.Init()
cache.mtx.Unlock()
}
// Push adds the given tx to the cache and returns true. It returns
// false if tx is already in the cache.
func (cache *mapTxCache) Push(tx types.Tx) bool {
cache.mtx.Lock()
defer cache.mtx.Unlock()
// Use the tx hash in the cache
txHash := txKey(tx)
if moved, exists := cache.map_[txHash]; exists {
cache.list.MoveToBack(moved)
return false
}
if cache.list.Len() >= cache.size {
popped := cache.list.Front()
poppedTxHash := popped.Value.([sha256.Size]byte)
delete(cache.map_, poppedTxHash)
if popped != nil {
cache.list.Remove(popped)
}
}
e := cache.list.PushBack(txHash)
cache.map_[txHash] = e
return true
}
// Remove removes the given tx from the cache.
func (cache *mapTxCache) Remove(tx types.Tx) {
cache.mtx.Lock()
txHash := txKey(tx)
popped := cache.map_[txHash]
delete(cache.map_, txHash)
if popped != nil {
cache.list.Remove(popped)
}
cache.mtx.Unlock()
}
type nopTxCache struct{}
var _ txCache = (*nopTxCache)(nil)
func (nopTxCache) Reset() {}
func (nopTxCache) Push(types.Tx) bool { return true }
func (nopTxCache) Remove(types.Tx) {}

+ 22
- 22
mempool/reactor.go View File

@ -31,13 +31,13 @@ const (
maxActiveIDs = math.MaxUint16
)
// MempoolReactor handles mempool tx broadcasting amongst peers.
// Reactor handles mempool tx broadcasting amongst peers.
// It maintains a map from peer ID to counter, to prevent gossiping txs to the
// peers you received it from.
type MempoolReactor struct {
type Reactor struct {
p2p.BaseReactor
config *cfg.MempoolConfig
Mempool *Mempool
mempool *CListMempool
ids *mempoolIDs
}
@ -104,25 +104,25 @@ func newMempoolIDs() *mempoolIDs {
}
}
// NewMempoolReactor returns a new MempoolReactor with the given config and mempool.
func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReactor {
memR := &MempoolReactor{
// NewReactor returns a new Reactor with the given config and mempool.
func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
memR := &Reactor{
config: config,
Mempool: mempool,
mempool: mempool,
ids: newMempoolIDs(),
}
memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR)
memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR)
return memR
}
// SetLogger sets the Logger on the reactor and the underlying Mempool.
func (memR *MempoolReactor) SetLogger(l log.Logger) {
// SetLogger sets the Logger on the reactor and the underlying mempool.
func (memR *Reactor) SetLogger(l log.Logger) {
memR.Logger = l
memR.Mempool.SetLogger(l)
memR.mempool.SetLogger(l)
}
// OnStart implements p2p.BaseReactor.
func (memR *MempoolReactor) OnStart() error {
func (memR *Reactor) OnStart() error {
if !memR.config.Broadcast {
memR.Logger.Info("Tx broadcasting is disabled")
}
@ -131,7 +131,7 @@ func (memR *MempoolReactor) OnStart() error {
// GetChannels implements Reactor.
// It returns the list of channels for this reactor.
func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: MempoolChannel,
@ -142,20 +142,20 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
// AddPeer implements Reactor.
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
func (memR *MempoolReactor) AddPeer(peer p2p.Peer) {
func (memR *Reactor) AddPeer(peer p2p.Peer) {
memR.ids.ReserveForPeer(peer)
go memR.broadcastTxRoutine(peer)
}
// RemovePeer implements Reactor.
func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
memR.ids.Reclaim(peer)
// broadcast routine checks if peer is gone and returns
}
// Receive implements Reactor.
// It adds any received transactions to the mempool.
func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
if err != nil {
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
@ -167,9 +167,9 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
switch msg := msg.(type) {
case *TxMessage:
peerID := memR.ids.GetForPeer(src)
err := memR.Mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{PeerID: peerID})
err := memR.mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{SenderID: peerID})
if err != nil {
memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err)
memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err)
}
// broadcasting happens from go routines per peer
default:
@ -183,7 +183,7 @@ type PeerState interface {
}
// Send new mempool txs to peer.
func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
if !memR.config.Broadcast {
return
}
@ -200,8 +200,8 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
// start from the beginning.
if next == nil {
select {
case <-memR.Mempool.TxsWaitChan(): // Wait until a tx is available
if next = memR.Mempool.TxsFront(); next == nil {
case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
if next = memR.mempool.TxsFront(); next == nil {
continue
}
case <-peer.Quit():
@ -255,7 +255,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
//-----------------------------------------------------------------------------
// Messages
// MempoolMessage is a message sent or received by the MempoolReactor.
// MempoolMessage is a message sent or received by the Reactor.
type MempoolMessage interface{}
func RegisterMempoolMessages(cdc *amino.Codec) {


+ 23
- 24
mempool/reactor_test.go View File

@ -1,7 +1,6 @@
package mempool
import (
"fmt"
"net"
"sync"
"testing"
@ -43,8 +42,8 @@ func mempoolLogger() log.Logger {
}
// connect N mempool reactors through N switches
func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor {
reactors := make([]*MempoolReactor, N)
func makeAndConnectReactors(config *cfg.Config, N int) []*Reactor {
reactors := make([]*Reactor, N)
logger := mempoolLogger()
for i := 0; i < N; i++ {
app := kvstore.NewKVStoreApplication()
@ -52,7 +51,7 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states
reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))
}
@ -64,13 +63,15 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor
return reactors
}
// wait for all txs on all reactors
func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) {
func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
// wait for the txs in all mempools
wg := new(sync.WaitGroup)
for i := 0; i < len(reactors); i++ {
for i, reactor := range reactors {
wg.Add(1)
go _waitForTxs(t, wg, txs, i, reactors)
go func(r *Reactor, reactorIndex int) {
defer wg.Done()
waitForTxsOnReactor(t, txs, r, reactorIndex)
}(reactor, i)
}
done := make(chan struct{})
@ -87,25 +88,23 @@ func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) {
}
}
// wait for all txs on a single mempool
func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int, reactors []*MempoolReactor) {
mempool := reactors[reactorIdx].Mempool
for mempool.Size() != len(txs) {
func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
mempool := reactor.mempool
for mempool.Size() < len(txs) {
time.Sleep(time.Millisecond * 100)
}
reapedTxs := mempool.ReapMaxTxs(len(txs))
for i, tx := range txs {
assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i]))
assert.Equalf(t, tx, reapedTxs[i],
"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
}
wg.Done()
}
// ensure no txs on reactor after some timeout
func ensureNoTxs(t *testing.T, reactor *MempoolReactor, timeout time.Duration) {
func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
time.Sleep(timeout) // wait for the txs in all mempools
assert.Zero(t, reactor.Mempool.Size())
assert.Zero(t, reactor.mempool.Size())
}
const (
@ -116,7 +115,7 @@ const (
func TestReactorBroadcastTxMessage(t *testing.T) {
config := cfg.TestConfig()
const N = 4
reactors := makeAndConnectMempoolReactors(config, N)
reactors := makeAndConnectReactors(config, N)
defer func() {
for _, r := range reactors {
r.Stop()
@ -130,14 +129,14 @@ func TestReactorBroadcastTxMessage(t *testing.T) {
// send a bunch of txs to the first reactor's mempool
// and wait for them all to be received in the others
txs := checkTxs(t, reactors[0].Mempool, NUM_TXS, UnknownPeerID)
waitForTxs(t, txs, reactors)
txs := checkTxs(t, reactors[0].mempool, NUM_TXS, UnknownPeerID)
waitForTxsOnReactors(t, txs, reactors)
}
func TestReactorNoBroadcastToSender(t *testing.T) {
config := cfg.TestConfig()
const N = 2
reactors := makeAndConnectMempoolReactors(config, N)
reactors := makeAndConnectReactors(config, N)
defer func() {
for _, r := range reactors {
r.Stop()
@ -146,7 +145,7 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
// send a bunch of txs to the first reactor's mempool, claiming it came from peer
// ensure peer gets no txs
checkTxs(t, reactors[0].Mempool, NUM_TXS, 1)
checkTxs(t, reactors[0].mempool, NUM_TXS, 1)
ensureNoTxs(t, reactors[1], 100*time.Millisecond)
}
@ -157,7 +156,7 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
config := cfg.TestConfig()
const N = 2
reactors := makeAndConnectMempoolReactors(config, N)
reactors := makeAndConnectReactors(config, N)
defer func() {
for _, r := range reactors {
r.Stop()
@ -180,7 +179,7 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
config := cfg.TestConfig()
const N = 2
reactors := makeAndConnectMempoolReactors(config, N)
reactors := makeAndConnectReactors(config, N)
// stop reactors
for _, r := range reactors {


+ 46
- 0
mock/mempool.go View File

@ -0,0 +1,46 @@
package mock
import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/clist"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/types"
)
// Mempool is an empty implementation of a Mempool, useful for testing.
type Mempool struct{}
var _ mempl.Mempool = Mempool{}
func (Mempool) Lock() {}
func (Mempool) Unlock() {}
func (Mempool) Size() int { return 0 }
func (Mempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error {
return nil
}
func (Mempool) CheckTxWithInfo(_ types.Tx, _ func(*abci.Response),
_ mempl.TxInfo) error {
return nil
}
func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
func (Mempool) Update(
_ int64,
_ types.Txs,
_ []*abci.ResponseDeliverTx,
_ mempl.PreCheckFunc,
_ mempl.PostCheckFunc,
) error {
return nil
}
func (Mempool) Flush() {}
func (Mempool) FlushAppConn() error { return nil }
func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) }
func (Mempool) EnableTxsAvailable() {}
func (Mempool) TxsBytes() int64 { return 0 }
func (Mempool) TxsFront() *clist.CElement { return nil }
func (Mempool) TxsWaitChan() <-chan struct{} { return nil }
func (Mempool) InitWAL() {}
func (Mempool) CloseWAL() {}

+ 4
- 2
networks/remote/integration.sh View File

@ -30,7 +30,6 @@ go get $REPO
cd $GOPATH/src/$REPO
## build
git checkout zach/ansible
make get_tools
make get_vendor_deps
make build
@ -84,8 +83,11 @@ ip3=$(strip $ip3)
# all the ansible commands are also directory specific
cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible
# create config dirs
tendermint testnet
ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet
sleep 10


+ 319
- 180
node/node.go View File

@ -18,8 +18,10 @@ import (
amino "github.com/tendermint/go-amino"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/blockchain"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/consensus"
cs "github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/evidence"
@ -96,7 +98,7 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
if _, err := os.Stat(oldPrivVal); !os.IsNotExist(err) {
oldPV, err := privval.LoadOldFilePV(oldPrivVal)
if err != nil {
return nil, fmt.Errorf("Error reading OldPrivValidator from %v: %v\n", oldPrivVal, err)
return nil, fmt.Errorf("error reading OldPrivValidator from %v: %v\n", oldPrivVal, err)
}
logger.Info("Upgrading PrivValidator file",
"old", oldPrivVal,
@ -157,11 +159,13 @@ type Node struct {
// services
eventBus *types.EventBus // pub/sub for services
stateDB dbm.DB
blockStore *bc.BlockStore // store the blockchain to disk
bcReactor *bc.BlockchainReactor // for fast-syncing
mempoolReactor *mempl.MempoolReactor // for gossipping transactions
blockStore *bc.BlockStore // store the blockchain to disk
bcReactor *bc.BlockchainReactor // for fast-syncing
mempoolReactor *mempl.Reactor // for gossipping transactions
mempool mempl.Mempool
consensusState *cs.ConsensusState // latest consensus state
consensusReactor *cs.ConsensusReactor // for participating in the consensus
pexReactor *pex.PEXReactor // for exchanging peer addresses
evidencePool *evidence.EvidencePool // tracking evidence
proxyApp proxy.AppConns // connection to the application
rpcListeners []net.Listener // rpc servers
@ -170,73 +174,49 @@ type Node struct {
prometheusSrv *http.Server
}
// NewNode returns a new, ready to go, Tendermint Node.
func NewNode(config *cfg.Config,
privValidator types.PrivValidator,
nodeKey *p2p.NodeKey,
clientCreator proxy.ClientCreator,
genesisDocProvider GenesisDocProvider,
dbProvider DBProvider,
metricsProvider MetricsProvider,
logger log.Logger) (*Node, error) {
// Get BlockStore
blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *bc.BlockStore, stateDB dbm.DB, err error) {
var blockStoreDB dbm.DB
blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
if err != nil {
return nil, err
return
}
blockStore := bc.NewBlockStore(blockStoreDB)
blockStore = bc.NewBlockStore(blockStoreDB)
// Get State
stateDB, err := dbProvider(&DBContext{"state", config})
stateDB, err = dbProvider(&DBContext{"state", config})
if err != nil {
return nil, err
return
}
// Get genesis doc
// TODO: move to state package?
genDoc, err := loadGenesisDoc(stateDB)
if err != nil {
genDoc, err = genesisDocProvider()
if err != nil {
return nil, err
}
// save genesis doc to prevent a certain class of user errors (e.g. when it
// was changed, accidentally or not). Also good for audit trail.
saveGenesisDoc(stateDB, genDoc)
}
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
if err != nil {
return nil, err
}
return
}
// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
proxyApp := proxy.NewAppConns(clientCreator)
proxyApp.SetLogger(logger.With("module", "proxy"))
if err := proxyApp.Start(); err != nil {
return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
return nil, fmt.Errorf("error starting proxy app connections: %v", err)
}
return proxyApp, nil
}
// EventBus and IndexerService must be started before the handshake because
// we might need to index the txs of the replayed block as this might not have happened
// when the node stopped last time (i.e. the node stopped after it saved the block
// but before it indexed the txs, or, endblocker panicked)
func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events"))
err = eventBus.Start()
if err != nil {
if err := eventBus.Start(); err != nil {
return nil, err
}
return eventBus, nil
}
func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider,
eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) {
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.TxIndex.Indexer {
case "kv":
store, err := dbProvider(&DBContext{"tx_index", config})
if err != nil {
return nil, err
return nil, nil, err
}
if config.TxIndex.IndexTags != "" {
txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
@ -251,26 +231,26 @@ func NewNode(config *cfg.Config,
indexerService := txindex.NewIndexerService(txIndexer, eventBus)
indexerService.SetLogger(logger.With("module", "txindex"))
err = indexerService.Start()
if err != nil {
return nil, err
if err := indexerService.Start(); err != nil {
return nil, nil, err
}
return indexerService, txIndexer, nil
}
func doHandshake(stateDB dbm.DB, state sm.State, blockStore sm.BlockStore,
genDoc *types.GenesisDoc, eventBus *types.EventBus, proxyApp proxy.AppConns, consensusLogger log.Logger) error {
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
// and replays any blocks as necessary to sync tendermint with the app.
consensusLogger := logger.With("module", "consensus")
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
handshaker.SetLogger(consensusLogger)
handshaker.SetEventBus(eventBus)
if err := handshaker.Handshake(proxyApp); err != nil {
return nil, fmt.Errorf("Error during handshake: %v", err)
return fmt.Errorf("error during handshake: %v", err)
}
return nil
}
// Reload the state. It will have the Version.Consensus.App set by the
// Handshake, and may have other modifications as well (ie. depending on
// what happened during block replay).
state = sm.LoadState(stateDB)
func logNodeStartupInfo(state sm.State, privValidator types.PrivValidator, logger,
consensusLogger log.Logger) {
// Log the version info.
logger.Info("Version info",
@ -287,27 +267,6 @@ func NewNode(config *cfg.Config,
)
}
if config.PrivValidatorListenAddr != "" {
// If an address is provided, listen on the socket for a connection from an
// external signing process.
// FIXME: we should start services inside OnStart
privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger)
if err != nil {
return nil, errors.Wrap(err, "Error with private validator socket client")
}
}
// Decide whether to fast-sync or not
// We don't fast-sync when the only validator is us.
fastSync := config.FastSync
if state.Validators.Size() == 1 {
addr, _ := state.Validators.GetByIndex(0)
privValAddr := privValidator.GetPubKey().Address()
if bytes.Equal(privValAddr, addr) {
fastSync = false
}
}
pubKey := privValidator.GetPubKey()
addr := pubKey.Address()
// Log whether this node is a validator or an observer
@ -316,11 +275,20 @@ func NewNode(config *cfg.Config,
} else {
consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
}
}
csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
func onlyValidatorIsUs(state sm.State, privVal types.PrivValidator) bool {
if state.Validators.Size() > 1 {
return false
}
addr, _ := state.Validators.GetByIndex(0)
return bytes.Equal(privVal.GetPubKey().Address(), addr)
}
// Make MempoolReactor
mempool := mempl.NewMempool(
func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
mempool := mempl.NewCListMempool(
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
@ -329,44 +297,42 @@ func NewNode(config *cfg.Config,
mempl.WithPostCheck(sm.TxPostCheck(state)),
)
mempoolLogger := logger.With("module", "mempool")
mempool.SetLogger(mempoolLogger)
if config.Mempool.WalEnabled() {
mempool.InitWAL() // no need to have the mempool wal during tests
}
mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
mempoolReactor.SetLogger(mempoolLogger)
if config.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}
return mempoolReactor, mempool
}
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
stateDB dbm.DB, logger log.Logger) (*evidence.EvidenceReactor, *evidence.EvidencePool, error) {
// Make Evidence Reactor
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
if err != nil {
return nil, err
return nil, nil, err
}
evidenceLogger := logger.With("module", "evidence")
evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB)
evidencePool.SetLogger(evidenceLogger)
evidenceReactor := evidence.NewEvidenceReactor(evidencePool)
evidenceReactor.SetLogger(evidenceLogger)
return evidenceReactor, evidencePool, nil
}
blockExecLogger := logger.With("module", "state")
// make block executor for consensus and blockchain reactors to execute blocks
blockExec := sm.NewBlockExecutor(
stateDB,
blockExecLogger,
proxyApp.Consensus(),
mempool,
evidencePool,
sm.BlockExecutorWithMetrics(smMetrics),
)
// Make BlockchainReactor
bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
bcReactor.SetLogger(logger.With("module", "blockchain"))
func createConsensusReactor(config *cfg.Config,
state sm.State,
blockExec *sm.BlockExecutor,
blockStore sm.BlockStore,
mempool *mempl.CListMempool,
evidencePool *evidence.EvidencePool,
privValidator types.PrivValidator,
csMetrics *cs.Metrics,
fastSync bool,
eventBus *types.EventBus,
consensusLogger log.Logger) (*consensus.ConsensusReactor, *consensus.ConsensusState) {
// Make ConsensusReactor
consensusState := cs.NewConsensusState(
config.Consensus,
state.Copy(),
@ -382,28 +348,13 @@ func NewNode(config *cfg.Config,
}
consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
consensusReactor.SetLogger(consensusLogger)
// services which will be publishing and/or subscribing for messages (events)
// consensusReactor will set it on consensusState and blockExecutor
consensusReactor.SetEventBus(eventBus)
return consensusReactor, consensusState
}
p2pLogger := logger.With("module", "p2p")
nodeInfo, err := makeNodeInfo(
config,
nodeKey.ID(),
txIndexer,
genDoc.ChainID,
p2p.NewProtocolVersion(
version.P2PProtocol, // global
state.Version.Consensus.Block,
state.Version.Consensus.App,
),
)
if err != nil {
return nil, err
}
// Setup Transport.
func createTransport(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp proxy.AppConns) (*p2p.MultiplexTransport, []p2p.PeerFilterFunc) {
var (
mConnConfig = p2p.MConnConfig(config.P2P)
transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
@ -429,7 +380,7 @@ func NewNode(config *cfg.Config,
return err
}
if res.IsErr() {
return fmt.Errorf("Error querying abci app: %v", res)
return fmt.Errorf("error querying abci app: %v", res)
}
return nil
@ -447,7 +398,7 @@ func NewNode(config *cfg.Config,
return err
}
if res.IsErr() {
return fmt.Errorf("Error querying abci app: %v", res)
return fmt.Errorf("error querying abci app: %v", res)
}
return nil
@ -456,8 +407,21 @@ func NewNode(config *cfg.Config,
}
p2p.MultiplexTransportConnFilters(connFilters...)(transport)
return transport, peerFilters
}
func createSwitch(config *cfg.Config,
transport *p2p.MultiplexTransport,
p2pMetrics *p2p.Metrics,
peerFilters []p2p.PeerFilterFunc,
mempoolReactor *mempl.Reactor,
bcReactor *blockchain.BlockchainReactor,
consensusReactor *consensus.ConsensusReactor,
evidenceReactor *evidence.EvidenceReactor,
nodeInfo p2p.NodeInfo,
nodeKey *p2p.NodeKey,
p2pLogger log.Logger) *p2p.Switch {
// Setup Switch.
sw := p2p.NewSwitch(
config.P2P,
transport,
@ -473,6 +437,164 @@ func NewNode(config *cfg.Config,
sw.SetNodeKey(nodeKey)
p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
return sw
}
func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
p2pLogger log.Logger) pex.AddrBook {
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
// Add ourselves to addrbook to prevent dialing ourselves
addrBook.AddOurAddress(sw.NetAddress())
sw.SetAddrBook(addrBook)
return addrBook
}
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
sw *p2p.Switch, logger log.Logger) *pex.PEXReactor {
// TODO persistent peers ? so we can have their DNS addrs saved
pexReactor := pex.NewPEXReactor(addrBook,
&pex.PEXReactorConfig{
Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
SeedMode: config.P2P.SeedMode,
// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
// blocks assuming 10s blocks ~ 28 hours.
// TODO (melekes): make it dynamic based on the actual block latencies
// from the live network.
// https://github.com/tendermint/tendermint/issues/3523
SeedDisconnectWaitPeriod: 28 * time.Hour,
})
pexReactor.SetLogger(logger.With("module", "pex"))
sw.AddReactor("PEX", pexReactor)
return pexReactor
}
// NewNode returns a new, ready to go, Tendermint Node.
func NewNode(config *cfg.Config,
privValidator types.PrivValidator,
nodeKey *p2p.NodeKey,
clientCreator proxy.ClientCreator,
genesisDocProvider GenesisDocProvider,
dbProvider DBProvider,
metricsProvider MetricsProvider,
logger log.Logger) (*Node, error) {
blockStore, stateDB, err := initDBs(config, dbProvider)
if err != nil {
return nil, err
}
state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
if err != nil {
return nil, err
}
// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
if err != nil {
return nil, err
}
// EventBus and IndexerService must be started before the handshake because
// we might need to index the txs of the replayed block as this might not have happened
// when the node stopped last time (i.e. the node stopped after it saved the block
// but before it indexed the txs, or, endblocker panicked)
eventBus, err := createAndStartEventBus(logger)
if err != nil {
return nil, err
}
// Transaction indexing
indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
if err != nil {
return nil, err
}
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
// and replays any blocks as necessary to sync tendermint with the app.
consensusLogger := logger.With("module", "consensus")
if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
return nil, err
}
// Reload the state. It will have the Version.Consensus.App set by the
// Handshake, and may have other modifications as well (ie. depending on
// what happened during block replay).
state = sm.LoadState(stateDB)
// If an address is provided, listen on the socket for a connection from an
// external signing process.
if config.PrivValidatorListenAddr != "" {
// FIXME: we should start services inside OnStart
privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger)
if err != nil {
return nil, errors.Wrap(err, "error with private validator socket client")
}
}
logNodeStartupInfo(state, privValidator, logger, consensusLogger)
// Decide whether to fast-sync or not
// We don't fast-sync when the only validator is us.
fastSync := config.FastSync && !onlyValidatorIsUs(state, privValidator)
csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
// Make MempoolReactor
mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
// Make Evidence Reactor
evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, logger)
if err != nil {
return nil, err
}
// make block executor for consensus and blockchain reactors to execute blocks
blockExec := sm.NewBlockExecutor(
stateDB,
logger.With("module", "state"),
proxyApp.Consensus(),
mempool,
evidencePool,
sm.BlockExecutorWithMetrics(smMetrics),
)
// Make BlockchainReactor
bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
bcReactor.SetLogger(logger.With("module", "blockchain"))
// Make ConsensusReactor
consensusReactor, consensusState := createConsensusReactor(
config, state, blockExec, blockStore, mempool, evidencePool,
privValidator, csMetrics, fastSync, eventBus, consensusLogger,
)
nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
if err != nil {
return nil, err
}
// Setup Transport.
transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
// Setup Switch.
p2pLogger := logger.With("module", "p2p")
sw := createSwitch(
config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
)
err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
if err != nil {
return nil, errors.Wrap(err, "could not add peers from persistent_peers field")
}
addrBook := createAddrBookAndSetOnSwitch(config, sw, p2pLogger)
// Optionally, start the pex reactor
//
@ -486,37 +608,13 @@ func NewNode(config *cfg.Config,
//
// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
// Note we currently use the addrBook regardless at least for AddOurAddress
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
// Add ourselves to addrbook to prevent dialing ourselves
addrBook.AddOurAddress(sw.NetAddress())
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
var pexReactor *pex.PEXReactor
if config.P2P.PexReactor {
// TODO persistent peers ? so we can have their DNS addrs saved
pexReactor := pex.NewPEXReactor(addrBook,
&pex.PEXReactorConfig{
Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
SeedMode: config.P2P.SeedMode,
// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
// blocks assuming 10s blocks ~ 28 hours.
// TODO (melekes): make it dynamic based on the actual block latencies
// from the live network.
// https://github.com/tendermint/tendermint/issues/3523
SeedDisconnectWaitPeriod: 28 * time.Hour,
})
pexReactor.SetLogger(logger.With("module", "pex"))
sw.AddReactor("PEX", pexReactor)
pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
}
sw.SetAddrBook(addrBook)
// run the profile server
profileHost := config.ProfListenAddress
if profileHost != "" {
go func() {
logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil))
}()
if config.ProfListenAddress != "" {
go logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil))
}
node := &Node{
@ -534,8 +632,10 @@ func NewNode(config *cfg.Config,
blockStore: blockStore,
bcReactor: bcReactor,
mempoolReactor: mempoolReactor,
mempool: mempool,
consensusState: consensusState,
consensusReactor: consensusReactor,
pexReactor: pexReactor,
evidencePool: evidencePool,
proxyApp: proxyApp,
txIndexer: txIndexer,
@ -584,6 +684,10 @@ func (n *Node) OnStart() error {
n.isListening = true
if n.config.Mempool.WalEnabled() {
n.mempool.InitWAL() // no need to have the mempool wal during tests
}
// Start the switch (the P2P server).
err = n.sw.Start()
if err != nil {
@ -591,11 +695,9 @@ func (n *Node) OnStart() error {
}
// Always connect to persistent peers
if n.config.P2P.PersistentPeers != "" {
err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true)
if err != nil {
return err
}
err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
if err != nil {
return errors.Wrap(err, "could not dial peers from persistent_peers field")
}
return nil
@ -617,7 +719,7 @@ func (n *Node) OnStop() {
// stop mempool WAL
if n.config.Mempool.WalEnabled() {
n.mempoolReactor.Mempool.CloseWAL()
n.mempool.CloseWAL()
}
if err := n.transport.Close(); err != nil {
@ -652,7 +754,7 @@ func (n *Node) ConfigureRPC() {
rpccore.SetStateDB(n.stateDB)
rpccore.SetBlockStore(n.blockStore)
rpccore.SetConsensusState(n.consensusState)
rpccore.SetMempool(n.mempoolReactor.Mempool)
rpccore.SetMempool(n.mempool)
rpccore.SetEvidencePool(n.evidencePool)
rpccore.SetP2PPeers(n.sw)
rpccore.SetP2PTransport(n)
@ -799,11 +901,21 @@ func (n *Node) ConsensusReactor() *cs.ConsensusReactor {
return n.consensusReactor
}
// MempoolReactor returns the Node's MempoolReactor.
func (n *Node) MempoolReactor() *mempl.MempoolReactor {
// MempoolReactor returns the Node's mempool reactor.
func (n *Node) MempoolReactor() *mempl.Reactor {
return n.mempoolReactor
}
// Mempool returns the Node's mempool.
func (n *Node) Mempool() mempl.Mempool {
return n.mempool
}
// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
func (n *Node) PEXReactor() *pex.PEXReactor {
return n.pexReactor
}
// EvidencePool returns the Node's EvidencePool.
func (n *Node) EvidencePool() *evidence.EvidencePool {
return n.evidencePool
@ -854,20 +966,24 @@ func (n *Node) NodeInfo() p2p.NodeInfo {
func makeNodeInfo(
config *cfg.Config,
nodeID p2p.ID,
nodeKey *p2p.NodeKey,
txIndexer txindex.TxIndexer,
chainID string,
protocolVersion p2p.ProtocolVersion,
genDoc *types.GenesisDoc,
state sm.State,
) (p2p.NodeInfo, error) {
txIndexerStatus := "on"
if _, ok := txIndexer.(*null.TxIndex); ok {
txIndexerStatus = "off"
}
nodeInfo := p2p.DefaultNodeInfo{
ProtocolVersion: protocolVersion,
ID_: nodeID,
Network: chainID,
Version: version.TMCoreSemVer,
ProtocolVersion: p2p.NewProtocolVersion(
version.P2PProtocol, // global
state.Version.Consensus.Block,
state.Version.Consensus.App,
),
ID_: nodeKey.ID(),
Network: genDoc.ChainID,
Version: version.TMCoreSemVer,
Channels: []byte{
bc.BlockchainChannel,
cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
@ -903,27 +1019,50 @@ var (
genesisDocKey = []byte("genesisDoc")
)
// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
// database, or creates one using the given genesisDocProvider and persists the
// result to the database. On success this also returns the genesis doc loaded
// through the given provider.
func LoadStateFromDBOrGenesisDocProvider(stateDB dbm.DB, genesisDocProvider GenesisDocProvider) (sm.State, *types.GenesisDoc, error) {
// Get genesis doc
genDoc, err := loadGenesisDoc(stateDB)
if err != nil {
genDoc, err = genesisDocProvider()
if err != nil {
return sm.State{}, nil, err
}
// save genesis doc to prevent a certain class of user errors (e.g. when it
// was changed, accidentally or not). Also good for audit trail.
saveGenesisDoc(stateDB, genDoc)
}
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
if err != nil {
return sm.State{}, nil, err
}
return state, genDoc, nil
}
// panics if failed to unmarshal bytes
func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
bytes := db.Get(genesisDocKey)
if len(bytes) == 0 {
b := db.Get(genesisDocKey)
if len(b) == 0 {
return nil, errors.New("Genesis doc not found")
}
var genDoc *types.GenesisDoc
err := cdc.UnmarshalJSON(bytes, &genDoc)
err := cdc.UnmarshalJSON(b, &genDoc)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
}
return genDoc, nil
}
// panics if failed to marshal the given genesis document
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
bytes, err := cdc.MarshalJSON(genDoc)
b, err := cdc.MarshalJSON(genDoc)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
}
db.SetSync(genesisDocKey, bytes)
db.SetSync(genesisDocKey, b)
}
func createAndStartPrivValidatorSocketClient(
@ -946,7 +1085,7 @@ func createAndStartPrivValidatorSocketClient(
listener = privval.NewTCPListener(ln, ed25519.GenPrivKey())
default:
return nil, fmt.Errorf(
"Wrong listen address: expected either 'tcp' or 'unix' protocols, got %s",
"wrong listen address: expected either 'tcp' or 'unix' protocols, got %s",
protocol,
)
}


+ 1
- 1
node/node_test.go View File

@ -224,7 +224,7 @@ func TestCreateProposalBlock(t *testing.T) {
// Make Mempool
memplMetrics := mempl.PrometheusMetrics("node_test")
mempool := mempl.NewMempool(
mempool := mempl.NewCListMempool(
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,


+ 21
- 3
p2p/base_reactor.go View File

@ -5,23 +5,40 @@ import (
"github.com/tendermint/tendermint/p2p/conn"
)
// Reactor is responsible for handling incoming messages on one or more
// Channel. Switch calls GetChannels when reactor is added to it. When a new
// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called
// when the peer is stopped. Receive is called when a message is received on a
// channel associated with this reactor.
//
// Peer#Send or Peer#TrySend should be used to send the message to a peer.
type Reactor interface {
cmn.Service // Start, Stop
// SetSwitch allows setting a switch.
SetSwitch(*Switch)
// GetChannels returns the list of channel descriptors.
// GetChannels returns the list of MConnection.ChannelDescriptor. Make sure
// that each ID is unique across all the reactors added to the switch.
GetChannels() []*conn.ChannelDescriptor
// AddPeer is called by the switch when a new peer is added.
// InitPeer is called by the switch before the peer is started. Use it to
// initialize data for the peer (e.g. peer state).
//
// NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start
// the peer. Do not store any data associated with the peer in the reactor
// itself unless you don't want to have a state, which is never cleaned up.
InitPeer(peer Peer) Peer
// AddPeer is called by the switch after the peer is added and successfully
// started. Use it to start goroutines communicating with the peer.
AddPeer(peer Peer)
// RemovePeer is called by the switch when the peer is stopped (due to error
// or other reason).
RemovePeer(peer Peer, reason interface{})
// Receive is called when msgBytes is received from peer.
// Receive is called by the switch when msgBytes is received from the peer.
//
// NOTE reactor can not keep msgBytes around after Receive completes without
// copying.
@ -51,3 +68,4 @@ func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil
func (*BaseReactor) AddPeer(peer Peer) {}
func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
func (*BaseReactor) InitPeer(peer Peer) Peer { return peer }

+ 1
- 1
p2p/conn/connection.go View File

@ -707,7 +707,7 @@ type Channel struct {
func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
desc = desc.FillDefaults()
if desc.Priority <= 0 {
cmn.PanicSanity("Channel default priority must be a positive integer")
panic("Channel default priority must be a positive integer")
}
return &Channel{
conn: conn,


+ 6
- 0
p2p/conn/secret_connection.go View File

@ -8,6 +8,7 @@ import (
"encoding/binary"
"errors"
"io"
"math"
"net"
"sync"
"time"
@ -439,6 +440,11 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature []
// (little-endian in nonce[4:]).
func incrNonce(nonce *[aeadNonceSize]byte) {
counter := binary.LittleEndian.Uint64(nonce[4:])
if counter == math.MaxUint64 {
// Terminates the session and makes sure the nonce would not re-used.
// See https://github.com/tendermint/tendermint/issues/3531
panic("can't increase nonce without overflow")
}
counter++
binary.LittleEndian.PutUint64(nonce[4:], counter)
}

+ 1
- 3
p2p/netaddress.go View File

@ -14,8 +14,6 @@ import (
"time"
"errors"
cmn "github.com/tendermint/tendermint/libs/common"
)
// NetAddress defines information about a peer on the network
@ -48,7 +46,7 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress {
tcpAddr, ok := addr.(*net.TCPAddr)
if !ok {
if flag.Lookup("test.v") == nil { // normal run
cmn.PanicSanity(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr))
panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr))
} else { // in testing
netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0)
netAddr.ID = id


+ 92
- 0
p2p/peer_behaviour.go View File

@ -0,0 +1,92 @@
package p2p
import (
"errors"
"sync"
)
// PeerBehaviour are types of reportable behaviours about peers.
type PeerBehaviour int
const (
PeerBehaviourBadMessage = iota
PeerBehaviourMessageOutOfOrder
PeerBehaviourVote
PeerBehaviourBlockPart
)
// PeerBehaviourReporter provides an interface for reactors to report the behaviour
// of peers synchronously to other components.
type PeerBehaviourReporter interface {
Report(peerID ID, behaviour PeerBehaviour) error
}
// SwitchPeerBehaviouReporter reports peer behaviour to an internal Switch
type SwitchPeerBehaviourReporter struct {
sw *Switch
}
// Return a new SwitchPeerBehaviourReporter instance which wraps the Switch.
func NewSwitchPeerBehaviourReporter(sw *Switch) *SwitchPeerBehaviourReporter {
return &SwitchPeerBehaviourReporter{
sw: sw,
}
}
// Report reports the behaviour of a peer to the Switch
func (spbr *SwitchPeerBehaviourReporter) Report(peerID ID, behaviour PeerBehaviour) error {
peer := spbr.sw.Peers().Get(peerID)
if peer == nil {
return errors.New("Peer not found")
}
switch behaviour {
case PeerBehaviourVote, PeerBehaviourBlockPart:
spbr.sw.MarkPeerAsGood(peer)
case PeerBehaviourBadMessage:
spbr.sw.StopPeerForError(peer, "Bad message")
case PeerBehaviourMessageOutOfOrder:
spbr.sw.StopPeerForError(peer, "Message out of order")
default:
return errors.New("Unknown behaviour")
}
return nil
}
// MockPeerBehaviourReporter serves a mock concrete implementation of the
// PeerBehaviourReporter interface used in reactor tests to ensure reactors
// report the correct behaviour in manufactured scenarios.
type MockPeerBehaviourReporter struct {
mtx sync.RWMutex
pb map[ID][]PeerBehaviour
}
// NewMockPeerBehaviourReporter returns a PeerBehaviourReporter which records all reported
// behaviours in memory.
func NewMockPeerBehaviourReporter() *MockPeerBehaviourReporter {
return &MockPeerBehaviourReporter{
pb: map[ID][]PeerBehaviour{},
}
}
// Report stores the PeerBehaviour produced by the peer identified by peerID.
func (mpbr *MockPeerBehaviourReporter) Report(peerID ID, behaviour PeerBehaviour) {
mpbr.mtx.Lock()
defer mpbr.mtx.Unlock()
mpbr.pb[peerID] = append(mpbr.pb[peerID], behaviour)
}
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
func (mpbr *MockPeerBehaviourReporter) GetBehaviours(peerID ID) []PeerBehaviour {
mpbr.mtx.RLock()
defer mpbr.mtx.RUnlock()
if items, ok := mpbr.pb[peerID]; ok {
result := make([]PeerBehaviour, len(items))
copy(result, items)
return result
} else {
return []PeerBehaviour{}
}
}

+ 180
- 0
p2p/peer_behaviour_test.go View File

@ -0,0 +1,180 @@
package p2p_test
import (
"sync"
"testing"
"github.com/tendermint/tendermint/p2p"
)
// TestMockPeerBehaviour tests the MockPeerBehaviour' ability to store reported
// peer behaviour in memory indexed by the peerID
func TestMockPeerBehaviourReporter(t *testing.T) {
var peerID p2p.ID = "MockPeer"
pr := p2p.NewMockPeerBehaviourReporter()
behaviours := pr.GetBehaviours(peerID)
if len(behaviours) != 0 {
t.Error("Expected to have no behaviours reported")
}
pr.Report(peerID, p2p.PeerBehaviourBadMessage)
behaviours = pr.GetBehaviours(peerID)
if len(behaviours) != 1 {
t.Error("Expected the peer have one reported behaviour")
}
if behaviours[0] != p2p.PeerBehaviourBadMessage {
t.Error("Expected PeerBehaviourBadMessage to have been reported")
}
}
type scriptedBehaviours struct {
PeerID p2p.ID
Behaviours []p2p.PeerBehaviour
}
type scriptItem struct {
PeerID p2p.ID
Behaviour p2p.PeerBehaviour
}
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
// the same freequency and otherwise false.
func equalBehaviours(a []p2p.PeerBehaviour, b []p2p.PeerBehaviour) bool {
aHistogram := map[p2p.PeerBehaviour]int{}
bHistogram := map[p2p.PeerBehaviour]int{}
for _, behaviour := range a {
aHistogram[behaviour] += 1
}
for _, behaviour := range b {
bHistogram[behaviour] += 1
}
if len(aHistogram) != len(bHistogram) {
return false
}
for _, behaviour := range a {
if aHistogram[behaviour] != bHistogram[behaviour] {
return false
}
}
for _, behaviour := range b {
if bHistogram[behaviour] != aHistogram[behaviour] {
return false
}
}
return true
}
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
// of peer behaviours can be compared for the behaviours they contain and the
// freequencies that those behaviours occur.
func TestEqualPeerBehaviours(t *testing.T) {
equals := []struct {
left []p2p.PeerBehaviour
right []p2p.PeerBehaviour
}{
// Empty sets
{[]p2p.PeerBehaviour{}, []p2p.PeerBehaviour{}},
// Single behaviours
{[]p2p.PeerBehaviour{p2p.PeerBehaviourVote}, []p2p.PeerBehaviour{p2p.PeerBehaviourVote}},
// Equal Frequencies
{[]p2p.PeerBehaviour{p2p.PeerBehaviourVote, p2p.PeerBehaviourVote},
[]p2p.PeerBehaviour{p2p.PeerBehaviourVote, p2p.PeerBehaviourVote}},
// Equal frequencies different orders
{[]p2p.PeerBehaviour{p2p.PeerBehaviourVote, p2p.PeerBehaviourBlockPart},
[]p2p.PeerBehaviour{p2p.PeerBehaviourBlockPart, p2p.PeerBehaviourVote}},
}
for _, test := range equals {
if !equalBehaviours(test.left, test.right) {
t.Errorf("Expected %#v and %#v to be equal", test.left, test.right)
}
}
unequals := []struct {
left []p2p.PeerBehaviour
right []p2p.PeerBehaviour
}{
// Comparing empty sets to non empty sets
{[]p2p.PeerBehaviour{}, []p2p.PeerBehaviour{p2p.PeerBehaviourVote}},
// Different behaviours
{[]p2p.PeerBehaviour{p2p.PeerBehaviourVote}, []p2p.PeerBehaviour{p2p.PeerBehaviourBlockPart}},
// Same behaviour with different frequencies
{[]p2p.PeerBehaviour{p2p.PeerBehaviourVote},
[]p2p.PeerBehaviour{p2p.PeerBehaviourVote, p2p.PeerBehaviourVote}},
}
for _, test := range unequals {
if equalBehaviours(test.left, test.right) {
t.Errorf("Expected %#v and %#v to be unequal", test.left, test.right)
}
}
}
// TestPeerBehaviourConcurrency constructs a scenario in which
// multiple goroutines are using the same MockPeerBehaviourReporter instance.
// This test reproduces the conditions in which MockPeerBehaviourReporter will
// be used within a Reactor Receive method tests to ensure thread safety.
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
behaviourScript := []scriptedBehaviours{
{"1", []p2p.PeerBehaviour{p2p.PeerBehaviourVote}},
{"2", []p2p.PeerBehaviour{p2p.PeerBehaviourVote, p2p.PeerBehaviourVote, p2p.PeerBehaviourVote, p2p.PeerBehaviourVote}},
{"3", []p2p.PeerBehaviour{p2p.PeerBehaviourBlockPart, p2p.PeerBehaviourVote, p2p.PeerBehaviourBlockPart, p2p.PeerBehaviourVote}},
{"4", []p2p.PeerBehaviour{p2p.PeerBehaviourVote, p2p.PeerBehaviourVote, p2p.PeerBehaviourVote, p2p.PeerBehaviourVote}},
{"5", []p2p.PeerBehaviour{p2p.PeerBehaviourBlockPart, p2p.PeerBehaviourVote, p2p.PeerBehaviourBlockPart, p2p.PeerBehaviourVote}},
}
var receiveWg sync.WaitGroup
pr := p2p.NewMockPeerBehaviourReporter()
scriptItems := make(chan scriptItem)
done := make(chan int)
numConsumers := 3
for i := 0; i < numConsumers; i++ {
receiveWg.Add(1)
go func() {
defer receiveWg.Done()
for {
select {
case pb := <-scriptItems:
pr.Report(pb.PeerID, pb.Behaviour)
case <-done:
return
}
}
}()
}
var sendingWg sync.WaitGroup
sendingWg.Add(1)
go func() {
defer sendingWg.Done()
for _, item := range behaviourScript {
for _, reason := range item.Behaviours {
scriptItems <- scriptItem{item.PeerID, reason}
}
}
}()
sendingWg.Wait()
for i := 0; i < numConsumers; i++ {
done <- 1
}
receiveWg.Wait()
for _, items := range behaviourScript {
reported := pr.GetBehaviours(items.PeerID)
if !equalBehaviours(reported, items.Behaviours) {
t.Errorf("Expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
items.PeerID, items.Behaviours, reported)
}
}
}

+ 2
- 2
p2p/pex/file.go View File

@ -53,14 +53,14 @@ func (a *addrBook) loadFromFile(filePath string) bool {
// Load addrBookJSON{}
r, err := os.Open(filePath)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Error opening file %s: %v", filePath, err))
panic(fmt.Sprintf("Error opening file %s: %v", filePath, err))
}
defer r.Close() // nolint: errcheck
aJSON := &addrBookJSON{}
dec := json.NewDecoder(r)
err = dec.Decode(aJSON)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Error reading file %s: %v", filePath, err))
panic(fmt.Sprintf("Error reading file %s: %v", filePath, err))
}
// Restore all the fields...


+ 80
- 49
p2p/pex/pex_reactor.go View File

@ -48,6 +48,24 @@ const (
biasToSelectNewPeers = 30 // 70 to select good peers
)
type errMaxAttemptsToDial struct {
}
func (e errMaxAttemptsToDial) Error() string {
return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial)
}
type errTooEarlyToDial struct {
backoffDuration time.Duration
lastDialed time.Time
}
func (e errTooEarlyToDial) Error() string {
return fmt.Sprintf(
"too early to dial (backoff duration: %d, last dialed: %v, time since: %v)",
e.backoffDuration, e.lastDialed, time.Since(e.lastDialed))
}
// PEXReactor handles PEX (peer exchange) and ensures that an
// adequate number of peers are connected to the switch.
//
@ -127,7 +145,7 @@ func (r *PEXReactor) OnStart() error {
if err != nil {
return err
} else if numOnline == 0 && r.book.Empty() {
return errors.New("Address book is empty, and could not connect to any seed nodes")
return errors.New("Address book is empty and couldn't resolve any seed nodes")
}
r.seedAddrs = seedAddrs
@ -186,6 +204,13 @@ func (r *PEXReactor) AddPeer(p Peer) {
}
}
// RemovePeer implements Reactor by resetting peer's requests info.
func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
id := string(p.ID())
r.requestsSent.Delete(id)
r.lastReceivedRequests.Delete(id)
}
func (r *PEXReactor) logErrAddrBook(err error) {
if err != nil {
switch err.(type) {
@ -198,13 +223,6 @@ func (r *PEXReactor) logErrAddrBook(err error) {
}
}
// RemovePeer implements Reactor.
func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
id := string(p.ID())
r.requestsSent.Delete(id)
r.lastReceivedRequests.Delete(id)
}
// Receive implements Reactor by handling incoming PEX messages.
func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
@ -285,7 +303,7 @@ func (r *PEXReactor) receiveRequest(src Peer) error {
now := time.Now()
minInterval := r.minReceiveRequestInterval()
if now.Sub(lastReceived) < minInterval {
return fmt.Errorf("Peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
return fmt.Errorf("peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
src.ID(),
lastReceived,
now,
@ -296,14 +314,14 @@ func (r *PEXReactor) receiveRequest(src Peer) error {
return nil
}
// RequestAddrs asks peer for more addresses if we do not already
// have a request out for this peer.
// RequestAddrs asks peer for more addresses if we do not already have a
// request out for this peer.
func (r *PEXReactor) RequestAddrs(p Peer) {
r.Logger.Debug("Request addrs", "from", p)
id := string(p.ID())
if r.requestsSent.Has(id) {
return
}
r.Logger.Debug("Request addrs", "from", p)
r.requestsSent.Set(id, struct{}{})
p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{}))
}
@ -314,7 +332,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) {
func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
id := string(src.ID())
if !r.requestsSent.Has(id) {
return errors.New("Unsolicited pexAddrsMessage")
return errors.New("unsolicited pexAddrsMessage")
}
r.requestsSent.Delete(id)
@ -449,11 +467,21 @@ func (r *PEXReactor) ensurePeers() {
// Dial picked addresses
for _, addr := range toDial {
go r.dialPeer(addr)
go func(addr *p2p.NetAddress) {
err := r.dialPeer(addr)
if err != nil {
switch err.(type) {
case errMaxAttemptsToDial, errTooEarlyToDial:
r.Logger.Debug(err.Error(), "addr", addr)
default:
r.Logger.Error(err.Error(), "addr", addr)
}
}
}(addr)
}
// If we need more addresses, pick a random peer and ask for more.
if r.book.NeedMoreAddrs() {
// 1) Pick a random peer and ask for more.
peers := r.Switch.Peers().List()
peersCount := len(peers)
if peersCount > 0 {
@ -461,12 +489,14 @@ func (r *PEXReactor) ensurePeers() {
r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer)
r.RequestAddrs(peer)
}
}
// If we are not connected to nor dialing anybody, fallback to dialing a seed.
if out+in+dial+len(toDial) == 0 {
r.Logger.Info("No addresses to dial nor connected peers. Falling back to seeds")
r.dialSeeds()
// 2) Dial seeds if we are not dialing anyone.
// This is done in addition to asking a peer for addresses to work-around
// peers not participating in PEX.
if len(toDial) == 0 {
r.Logger.Info("No addresses to dial. Falling back to seeds")
r.dialSeeds()
}
}
}
@ -479,17 +509,16 @@ func (r *PEXReactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastD
return atd.number, atd.lastDialed
}
func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) error {
attempts, lastDialed := r.dialAttemptsInfo(addr)
if attempts > maxAttemptsToDial {
// Do not log the message if the addr gets readded.
if attempts+1 == maxAttemptsToDial {
r.Logger.Info("Reached max attempts to dial", "addr", addr, "attempts", attempts)
r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
}
// TODO(melekes): have a blacklist in the addrbook with peers whom we've
// failed to connect to. Then we can clean up attemptsToDial, which acts as
// a blacklist currently.
// https://github.com/tendermint/tendermint/issues/3572
r.book.MarkBad(addr)
return
return errMaxAttemptsToDial{}
}
// exponential backoff if it's not our first attempt to dial given address
@ -498,33 +527,30 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second)
sinceLastDialed := time.Since(lastDialed)
if sinceLastDialed < backoffDuration {
r.Logger.Debug("Too early to dial", "addr", addr, "backoff_duration", backoffDuration, "last_dialed", lastDialed, "time_since", sinceLastDialed)
return
return errTooEarlyToDial{backoffDuration, lastDialed}
}
}
err := r.Switch.DialPeerWithAddress(addr, false)
err := r.Switch.DialPeerWithAddress(addr)
if err != nil {
if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok {
return
return err
}
r.Logger.Error("Dialing failed", "addr", addr, "err", err, "attempts", attempts)
markAddrInBookBasedOnErr(addr, r.book, err)
if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok {
switch err.(type) {
case p2p.ErrSwitchAuthenticationFailure:
// NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr
r.attemptsToDial.Delete(addr.DialString())
} else {
// FIXME: if the addr is going to be removed from the addrbook (hard to
// tell at this point), we need to Delete it from attemptsToDial, not
// record another attempt.
// record attempt
default:
r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
}
return
return errors.Wrapf(err, "dialing failed (attempts: %d)", attempts+1)
}
// cleanup any history
r.attemptsToDial.Delete(addr.DialString())
return nil
}
// checkSeeds checks that addresses are well formed.
@ -547,7 +573,7 @@ func (r *PEXReactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, er
return 0, nil, errors.Wrap(e, "seed node configuration has error")
}
}
return
return numOnline, netAddrs, nil
}
// randomly dial seeds until we connect to one or exhaust them
@ -557,7 +583,7 @@ func (r *PEXReactor) dialSeeds() {
for _, i := range perm {
// dial a random seed
seedAddr := r.seedAddrs[i]
err := r.Switch.DialPeerWithAddress(seedAddr, false)
err := r.Switch.DialPeerWithAddress(seedAddr)
if err == nil {
return
}
@ -582,8 +608,13 @@ func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int {
// Seed/Crawler Mode causes this node to quickly disconnect
// from peers, except other seed nodes.
func (r *PEXReactor) crawlPeersRoutine() {
// Do an initial crawl
r.crawlPeers(r.book.GetSelection())
// If we have any seed nodes, consult them first
if len(r.seedAddrs) > 0 {
r.dialSeeds()
} else {
// Do an initial crawl
r.crawlPeers(r.book.GetSelection())
}
// Fire periodically
ticker := time.NewTicker(crawlPeerPeriod)
@ -633,14 +664,14 @@ func (r *PEXReactor) crawlPeers(addrs []*p2p.NetAddress) {
LastCrawled: now,
}
err := r.Switch.DialPeerWithAddress(addr, false)
err := r.dialPeer(addr)
if err != nil {
if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok {
continue
switch err.(type) {
case errMaxAttemptsToDial, errTooEarlyToDial:
r.Logger.Debug(err.Error(), "addr", addr)
default:
r.Logger.Error(err.Error(), "addr", addr)
}
r.Logger.Error("Dialing failed", "addr", addr, "err", err)
markAddrInBookBasedOnErr(addr, r.book, err)
continue
}


+ 73
- 5
p2p/pex/pex_reactor_test.go View File

@ -144,7 +144,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
sw.SetAddrBook(book)
peer := mock.NewPeer(nil)
p2p.AddPeerToSwitch(sw, peer)
p2p.AddPeerToSwitchPeerSet(sw, peer)
assert.True(t, sw.Peers().Has(peer.ID()))
id := string(peer.ID())
@ -174,7 +174,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
sw.SetAddrBook(book)
peer := mock.NewPeer(nil)
p2p.AddPeerToSwitch(sw, peer)
p2p.AddPeerToSwitchPeerSet(sw, peer)
assert.True(t, sw.Peers().Has(peer.ID()))
id := string(peer.ID())
@ -291,7 +291,8 @@ func TestPEXReactorSeedMode(t *testing.T) {
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
pexR, book := createReactor(&PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond})
pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond}
pexR, book := createReactor(pexRConfig)
defer teardownReactor(book)
sw := createSwitchAndAddReactors(pexR)
@ -315,13 +316,80 @@ func TestPEXReactorSeedMode(t *testing.T) {
pexR.attemptDisconnects()
assert.Equal(t, 1, sw.Peers().Size())
time.Sleep(100 * time.Millisecond)
// sleep for SeedDisconnectWaitPeriod
time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond)
// 3. attemptDisconnects should disconnect after wait period
pexR.attemptDisconnects()
assert.Equal(t, 0, sw.Peers().Size())
}
func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) {
// directory to store address books
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond}
pexR, book := createReactor(pexRConfig)
defer teardownReactor(book)
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
err = sw.Start()
require.NoError(t, err)
defer sw.Stop()
assert.Zero(t, sw.Peers().Size())
peerSwitch := testCreateDefaultPeer(dir, 1)
require.NoError(t, peerSwitch.Start())
defer peerSwitch.Stop()
err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})
require.NoError(t, err)
// 1. Test crawlPeers dials the peer
pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()})
assert.Equal(t, 1, sw.Peers().Size())
assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID()))
// sleep for SeedDisconnectWaitPeriod
time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond)
// 2. attemptDisconnects should not disconnect because the peer is persistent
pexR.attemptDisconnects()
assert.Equal(t, 1, sw.Peers().Size())
}
func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) {
// directory to store address books
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
pexR, book := createReactor(&PEXReactorConfig{SeedMode: true})
defer teardownReactor(book)
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
err = sw.Start()
require.NoError(t, err)
defer sw.Stop()
peer := mock.NewPeer(nil)
addr := peer.SocketAddr()
err = book.AddAddress(addr, addr)
require.NoError(t, err)
assert.True(t, book.HasAddress(addr))
// imitate maxAttemptsToDial reached
pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()})
pexR.crawlPeers([]*p2p.NetAddress{addr})
assert.False(t, book.HasAddress(addr))
}
// connect a peer to a seed, wait a bit, then stop it.
// this should give it time to request addrs and for the seed
// to call FlushStop, and allows us to test calling Stop concurrently
@ -370,7 +438,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) {
reactor := switches[0].Reactors()["pex"].(*PEXReactor)
peerID := switches[1].NodeInfo().ID()
err = switches[1].DialPeerWithAddress(switches[0].NetAddress(), false)
err = switches[1].DialPeerWithAddress(switches[0].NetAddress())
assert.NoError(t, err)
// sleep up to a second while waiting for the peer to send us a message.


+ 104
- 36
p2p/switch.go View File

@ -77,6 +77,8 @@ type Switch struct {
nodeInfo NodeInfo // our node info
nodeKey *NodeKey // our node privkey
addrBook AddrBook
// peers addresses with whom we'll maintain constant connection
persistentPeersAddrs []*NetAddress
transport Transport
@ -104,16 +106,17 @@ func NewSwitch(
options ...SwitchOption,
) *Switch {
sw := &Switch{
config: cfg,
reactors: make(map[string]Reactor),
chDescs: make([]*conn.ChannelDescriptor, 0),
reactorsByCh: make(map[byte]Reactor),
peers: NewPeerSet(),
dialing: cmn.NewCMap(),
reconnecting: cmn.NewCMap(),
metrics: NopMetrics(),
transport: transport,
filterTimeout: defaultFilterTimeout,
config: cfg,
reactors: make(map[string]Reactor),
chDescs: make([]*conn.ChannelDescriptor, 0),
reactorsByCh: make(map[byte]Reactor),
peers: NewPeerSet(),
dialing: cmn.NewCMap(),
reconnecting: cmn.NewCMap(),
metrics: NopMetrics(),
transport: transport,
filterTimeout: defaultFilterTimeout,
persistentPeersAddrs: make([]*NetAddress, 0),
}
// Ensure we have a completely undeterministic PRNG.
@ -155,7 +158,7 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
for _, chDesc := range reactorChannels {
chID := chDesc.ID
if sw.reactorsByCh[chID] != nil {
cmn.PanicSanity(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor))
panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor))
}
sw.chDescs = append(sw.chDescs, chDesc)
sw.reactorsByCh[chID] = reactor
@ -297,7 +300,19 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) {
sw.stopAndRemovePeer(peer, reason)
if peer.IsPersistent() {
go sw.reconnectToPeer(peer.SocketAddr())
var addr *NetAddress
if peer.IsOutbound() { // socket address for outbound peers
addr = peer.SocketAddr()
} else { // self-reported address for inbound peers
var err error
addr, err = peer.NodeInfo().NetAddress()
if err != nil {
sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong",
"peer", peer, "err", err)
return
}
}
go sw.reconnectToPeer(addr)
}
}
@ -309,14 +324,20 @@ func (sw *Switch) StopPeerGracefully(peer Peer) {
}
func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
if sw.peers.Remove(peer) {
sw.metrics.Peers.Add(float64(-1))
}
sw.transport.Cleanup(peer)
peer.Stop()
for _, reactor := range sw.reactors {
reactor.RemovePeer(peer, reason)
}
// Removing a peer should go last to avoid a situation where a peer
// reconnect to our node and the switch calls InitPeer before
// RemovePeer is finished.
// https://github.com/tendermint/tendermint/issues/3338
if sw.peers.Remove(peer) {
sw.metrics.Peers.Add(float64(-1))
}
}
// reconnectToPeer tries to reconnect to the addr, first repeatedly
@ -341,7 +362,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) {
return
}
err := sw.DialPeerWithAddress(addr, true)
err := sw.DialPeerWithAddress(addr)
if err == nil {
return // success
} else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok {
@ -365,7 +386,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) {
sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i))
sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second)
err := sw.DialPeerWithAddress(addr, true)
err := sw.DialPeerWithAddress(addr)
if err == nil {
return // success
} else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok {
@ -401,28 +422,41 @@ func isPrivateAddr(err error) bool {
return ok && te.PrivateAddr()
}
// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent).
// DialPeersAsync dials a list of peers asynchronously in random order.
// Used to dial peers from config on startup or from unsafe-RPC (trusted sources).
// TODO: remove addrBook arg since it's now set on the switch
func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error {
// It ignores ErrNetAddressLookup. However, if there are other errors, first
// encounter is returned.
// Nop if there are no peers.
func (sw *Switch) DialPeersAsync(peers []string) error {
netAddrs, errs := NewNetAddressStrings(peers)
// only log errors, dial correct addresses
// report all the errors
for _, err := range errs {
sw.Logger.Error("Error in peer's address", "err", err)
}
// return first non-ErrNetAddressLookup error
for _, err := range errs {
if _, ok := err.(ErrNetAddressLookup); ok {
continue
}
return err
}
sw.dialPeersAsync(netAddrs)
return nil
}
func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) {
ourAddr := sw.NetAddress()
// TODO: this code feels like it's in the wrong place.
// The integration tests depend on the addrBook being saved
// right away but maybe we can change that. Recall that
// the addrBook is only written to disk every 2min
if addrBook != nil {
if sw.addrBook != nil {
// add peers to `addrBook`
for _, netAddr := range netAddrs {
// do not add our address or ID
if !netAddr.Same(ourAddr) {
if err := addrBook.AddAddress(netAddr, ourAddr); err != nil {
if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil {
if isPrivateAddr(err) {
sw.Logger.Debug("Won't add peer's address to addrbook", "err", err)
} else {
@ -433,7 +467,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
}
// Persist some peers to disk right away.
// NOTE: integration tests depend on this
addrBook.Save()
sw.addrBook.Save()
}
// permute the list, dial them in random order.
@ -450,7 +484,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
sw.randomSleep(0)
err := sw.DialPeerWithAddress(addr, persistent)
err := sw.DialPeerWithAddress(addr)
if err != nil {
switch err.(type) {
case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress:
@ -461,16 +495,13 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
}
}(i)
}
return nil
}
// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects
// and authenticates successfully.
// If `persistent == true`, the switch will always try to reconnect to this
// peer if the connection ever fails.
// If we're currently dialing this address or it belongs to an existing peer,
// ErrCurrentlyDialingOrExistingAddress is returned.
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error {
func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error {
if sw.IsDialingOrExistingAddress(addr) {
return ErrCurrentlyDialingOrExistingAddress{addr.String()}
}
@ -478,7 +509,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error {
sw.dialing.Set(string(addr.ID), addr)
defer sw.dialing.Delete(string(addr.ID))
return sw.addOutboundPeerWithConfig(addr, sw.config, persistent)
return sw.addOutboundPeerWithConfig(addr, sw.config)
}
// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds]
@ -495,6 +526,38 @@ func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool {
(!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP))
}
// AddPersistentPeers allows you to set persistent peers. It ignores
// ErrNetAddressLookup. However, if there are other errors, first encounter is
// returned.
func (sw *Switch) AddPersistentPeers(addrs []string) error {
sw.Logger.Info("Adding persistent peers", "addrs", addrs)
netAddrs, errs := NewNetAddressStrings(addrs)
// report all the errors
for _, err := range errs {
sw.Logger.Error("Error in peer's address", "err", err)
}
// return first non-ErrNetAddressLookup error
for _, err := range errs {
if _, ok := err.(ErrNetAddressLookup); ok {
continue
}
return err
}
sw.persistentPeersAddrs = netAddrs
return nil
}
func (sw *Switch) isPeerPersistentFn() func(*NetAddress) bool {
return func(na *NetAddress) bool {
for _, pa := range sw.persistentPeersAddrs {
if pa.Equals(na) {
return true
}
}
return false
}
}
func (sw *Switch) acceptRoutine() {
for {
p, err := sw.transport.Accept(peerConfig{
@ -502,6 +565,7 @@ func (sw *Switch) acceptRoutine() {
onPeerError: sw.StopPeerForError,
reactorsByCh: sw.reactorsByCh,
metrics: sw.metrics,
isPersistent: sw.isPeerPersistentFn(),
})
if err != nil {
switch err := err.(type) {
@ -581,13 +645,12 @@ func (sw *Switch) acceptRoutine() {
// dial the peer; make secret connection; authenticate against the dialed ID;
// add the peer.
// if dialing fails, start the reconnect loop. If handhsake fails, its over.
// If peer is started succesffuly, reconnectLoop will start when
// StopPeerForError is called
// if dialing fails, start the reconnect loop. If handshake fails, it's over.
// If peer is started successfully, reconnectLoop will start when
// StopPeerForError is called.
func (sw *Switch) addOutboundPeerWithConfig(
addr *NetAddress,
cfg *config.P2PConfig,
persistent bool,
) error {
sw.Logger.Info("Dialing peer", "address", addr)
@ -600,7 +663,7 @@ func (sw *Switch) addOutboundPeerWithConfig(
p, err := sw.transport.Dial(*addr, peerConfig{
chDescs: sw.chDescs,
onPeerError: sw.StopPeerForError,
persistent: persistent,
isPersistent: sw.isPeerPersistentFn(),
reactorsByCh: sw.reactorsByCh,
metrics: sw.metrics,
})
@ -619,7 +682,7 @@ func (sw *Switch) addOutboundPeerWithConfig(
// retry persistent peers after
// any dial error besides IsSelf()
if persistent {
if sw.isPeerPersistentFn()(addr) {
go sw.reconnectToPeer(addr)
}
@ -682,6 +745,11 @@ func (sw *Switch) addPeer(p Peer) error {
return nil
}
// Add some data to the peer, which is required by reactors.
for _, reactor := range sw.reactors {
p = reactor.InitPeer(p)
}
// Start the peer's send/recv routines.
// Must start it before adding it to the peer set
// to prevent Start and Stop from being called concurrently.


+ 138
- 40
p2p/switch_test.go View File

@ -12,6 +12,7 @@ import (
"regexp"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
@ -167,7 +168,7 @@ func TestSwitchFiltersOutItself(t *testing.T) {
rp.Start()
// addr should be rejected in addPeer based on the same ID
err := s1.DialPeerWithAddress(rp.Addr(), false)
err := s1.DialPeerWithAddress(rp.Addr())
if assert.Error(t, err) {
if err, ok := err.(ErrRejected); ok {
if !err.IsSelf() {
@ -212,6 +213,7 @@ func TestSwitchPeerFilter(t *testing.T) {
p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
chDescs: sw.chDescs,
onPeerError: sw.StopPeerForError,
isPersistent: sw.isPeerPersistentFn(),
reactorsByCh: sw.reactorsByCh,
})
if err != nil {
@ -256,6 +258,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
chDescs: sw.chDescs,
onPeerError: sw.StopPeerForError,
isPersistent: sw.isPeerPersistentFn(),
reactorsByCh: sw.reactorsByCh,
})
if err != nil {
@ -281,6 +284,7 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) {
p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
chDescs: sw.chDescs,
onPeerError: sw.StopPeerForError,
isPersistent: sw.isPeerPersistentFn(),
reactorsByCh: sw.reactorsByCh,
})
if err != nil {
@ -326,6 +330,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
chDescs: sw.chDescs,
onPeerError: sw.StopPeerForError,
isPersistent: sw.isPeerPersistentFn(),
reactorsByCh: sw.reactorsByCh,
})
require.Nil(err)
@ -390,49 +395,32 @@ func TestSwitchStopPeerForError(t *testing.T) {
assert.EqualValues(t, 0, peersMetricValue())
}
func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
assert, require := assert.New(t), require.New(t)
func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) {
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
err := sw.Start()
if err != nil {
t.Error(err)
}
require.NoError(t, err)
defer sw.Stop()
// simulate remote peer
// 1. simulate failure by closing connection
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
chDescs: sw.chDescs,
onPeerError: sw.StopPeerForError,
persistent: true,
reactorsByCh: sw.reactorsByCh,
})
require.Nil(err)
require.Nil(sw.addPeer(p))
err = sw.AddPersistentPeers([]string{rp.Addr().String()})
require.NoError(t, err)
require.NotNil(sw.Peers().Get(rp.ID()))
err = sw.DialPeerWithAddress(rp.Addr())
require.Nil(t, err)
require.NotNil(t, sw.Peers().Get(rp.ID()))
// simulate failure by closing connection
p := sw.Peers().List()[0]
p.(*peer).CloseConn()
// TODO: remove sleep, detect the disconnection, wait for reconnect
npeers := sw.Peers().Size()
for i := 0; i < 20; i++ {
time.Sleep(250 * time.Millisecond)
npeers = sw.Peers().Size()
if npeers > 0 {
break
}
}
assert.NotZero(npeers)
assert.False(p.IsRunning())
waitUntilSwitchHasAtLeastNPeers(sw, 1)
assert.False(t, p.IsRunning()) // old peer instance
assert.Equal(t, 1, sw.Peers().Size()) // new peer instance
// simulate another remote peer
// 2. simulate first time dial failure
rp = &remotePeer{
PrivKey: ed25519.GenPrivKey(),
Config: cfg,
@ -443,23 +431,68 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
rp.Start()
defer rp.Stop()
// simulate first time dial failure
conf := config.DefaultP2PConfig()
conf.TestDialFail = true
err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true)
require.NotNil(err)
conf.TestDialFail = true // will trigger a reconnect
err = sw.addOutboundPeerWithConfig(rp.Addr(), conf)
require.NotNil(t, err)
// DialPeerWithAddres - sw.peerConfig resets the dialer
waitUntilSwitchHasAtLeastNPeers(sw, 2)
assert.Equal(t, 2, sw.Peers().Size())
}
// TODO: same as above
func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) {
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
err := sw.Start()
require.NoError(t, err)
defer sw.Stop()
// 1. simulate failure by closing the connection
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
err = sw.AddPersistentPeers([]string{rp.Addr().String()})
require.NoError(t, err)
conn, err := rp.Dial(sw.NetAddress())
require.NoError(t, err)
time.Sleep(50 * time.Millisecond)
require.NotNil(t, sw.Peers().Get(rp.ID()))
conn.Close()
waitUntilSwitchHasAtLeastNPeers(sw, 1)
assert.Equal(t, 1, sw.Peers().Size())
}
func TestSwitchDialPeersAsync(t *testing.T) {
if testing.Short() {
return
}
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
err := sw.Start()
require.NoError(t, err)
defer sw.Stop()
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
err = sw.DialPeersAsync([]string{rp.Addr().String()})
require.NoError(t, err)
time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond)
require.NotNil(t, sw.Peers().Get(rp.ID()))
}
func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) {
for i := 0; i < 20; i++ {
time.Sleep(250 * time.Millisecond)
npeers = sw.Peers().Size()
if npeers > 1 {
has := sw.Peers().Size()
if has >= n {
break
}
}
assert.EqualValues(2, npeers)
}
func TestSwitchFullConnectivity(t *testing.T) {
@ -571,6 +604,71 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
})
}
// mockReactor checks that InitPeer never called before RemovePeer. If that's
// not true, InitCalledBeforeRemoveFinished will return true.
type mockReactor struct {
*BaseReactor
// atomic
removePeerInProgress uint32
initCalledBeforeRemoveFinished uint32
}
func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) {
atomic.StoreUint32(&r.removePeerInProgress, 1)
defer atomic.StoreUint32(&r.removePeerInProgress, 0)
time.Sleep(100 * time.Millisecond)
}
func (r *mockReactor) InitPeer(peer Peer) Peer {
if atomic.LoadUint32(&r.removePeerInProgress) == 1 {
atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1)
}
return peer
}
func (r *mockReactor) InitCalledBeforeRemoveFinished() bool {
return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1
}
// see stopAndRemovePeer
func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) {
// make reactor
reactor := &mockReactor{}
reactor.BaseReactor = NewBaseReactor("mockReactor", reactor)
// make switch
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch {
sw.AddReactor("mock", reactor)
return sw
})
err := sw.Start()
require.NoError(t, err)
defer sw.Stop()
// add peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
_, err = rp.Dial(sw.NetAddress())
require.NoError(t, err)
// wait till the switch adds rp to the peer set
time.Sleep(50 * time.Millisecond)
// stop peer asynchronously
go sw.StopPeerForError(sw.Peers().Get(rp.ID()), "test")
// simulate peer reconnecting to us
_, err = rp.Dial(sw.NetAddress())
require.NoError(t, err)
// wait till the switch adds rp to the peer set
time.Sleep(50 * time.Millisecond)
// make sure reactor.RemovePeer is finished before InitPeer is called
assert.False(t, reactor.InitCalledBeforeRemoveFinished())
}
func BenchmarkSwitchBroadcast(b *testing.B) {
s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch {
// Make bar reactors of bar channels each


+ 1
- 1
p2p/test_util.go View File

@ -27,7 +27,7 @@ func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, ni
func (ni mockNodeInfo) Validate() error { return nil }
func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil }
func AddPeerToSwitch(sw *Switch, peer Peer) {
func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) {
sw.peers.Add(peer)
}


+ 22
- 6
p2p/transport.go View File

@ -37,11 +37,15 @@ type accept struct {
// events.
// TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour.
type peerConfig struct {
chDescs []*conn.ChannelDescriptor
onPeerError func(Peer, interface{})
outbound, persistent bool
reactorsByCh map[byte]Reactor
metrics *Metrics
chDescs []*conn.ChannelDescriptor
onPeerError func(Peer, interface{})
outbound bool
// isPersistent allows you to set a function, which, given socket address
// (for outbound peers) OR self-reported address (for inbound peers), tells
// if the peer is persistent or not.
isPersistent func(*NetAddress) bool
reactorsByCh map[byte]Reactor
metrics *Metrics
}
// Transport emits and connects to Peers. The implementation of Peer is left to
@ -446,9 +450,21 @@ func (mt *MultiplexTransport) wrapPeer(
socketAddr *NetAddress,
) Peer {
persistent := false
if cfg.isPersistent != nil {
if cfg.outbound {
persistent = cfg.isPersistent(socketAddr)
} else {
selfReportedAddr, err := ni.NetAddress()
if err == nil {
persistent = cfg.isPersistent(selfReportedAddr)
}
}
}
peerConn := newPeerConn(
cfg.outbound,
cfg.persistent,
persistent,
c,
socketAddr,
)


+ 1
- 1
p2p/trust/store.go View File

@ -156,7 +156,7 @@ func (tms *TrustMetricStore) loadFromDB() bool {
peers := make(map[string]MetricHistoryJSON)
err := json.Unmarshal(bytes, &peers)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err))
panic(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err))
}
// If history data exists in the file,


+ 14
- 3
privval/socket_listeners_test.go View File

@ -97,7 +97,15 @@ func TestListenerTimeoutAccept(t *testing.T) {
}
func TestListenerTimeoutReadWrite(t *testing.T) {
for _, tc := range listenerTestCases(t, time.Second, time.Millisecond) {
const (
// This needs to be long enough s.t. the Accept will definitely succeed:
timeoutAccept = time.Second
// This can be really short but in the TCP case, the accept can
// also trigger a timeoutReadWrite. Hence, we need to give it some time.
// Note: this controls how long this test actually runs.
timeoutReadWrite = 10 * time.Millisecond
)
for _, tc := range listenerTestCases(t, timeoutAccept, timeoutReadWrite) {
go func(dialer SocketDialer) {
_, err := dialer()
if err != nil {
@ -110,8 +118,7 @@ func TestListenerTimeoutReadWrite(t *testing.T) {
t.Fatal(err)
}
time.Sleep(2 * time.Millisecond)
// this will timeout because we don't write anything:
msg := make([]byte, 200)
_, err = c.Read(msg)
opErr, ok := err.(*net.OpError)
@ -122,5 +129,9 @@ func TestListenerTimeoutReadWrite(t *testing.T) {
if have, want := opErr.Op, "read"; have != want {
t.Errorf("for %s listener, have %v, want %v", tc.description, have, want)
}
if !opErr.Timeout() {
t.Errorf("for %s listener, got unexpected error: have %v, want Timeout error", tc.description, opErr)
}
}
}

+ 126
- 0
rpc/client/examples_test.go View File

@ -0,0 +1,126 @@
package client_test
import (
"bytes"
"fmt"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
func ExampleHTTP_simple() {
// Start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewKVStoreApplication()
node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig)
defer rpctest.StopTendermint(node)
// Create our RPC client
rpcAddr := rpctest.GetConfig().RPC.ListenAddress
c := client.NewHTTP(rpcAddr, "/websocket")
// Create a transaction
k := []byte("name")
v := []byte("satoshi")
tx := append(k, append([]byte("="), v...)...)
// Broadcast the transaction and wait for it to commit (rather use
// c.BroadcastTxSync though in production)
bres, err := c.BroadcastTxCommit(tx)
if err != nil {
panic(err)
}
if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() {
panic("BroadcastTxCommit transaction failed")
}
// Now try to fetch the value for the key
qres, err := c.ABCIQuery("/key", k)
if err != nil {
panic(err)
}
if qres.Response.IsErr() {
panic("ABCIQuery failed")
}
if !bytes.Equal(qres.Response.Key, k) {
panic("returned key does not match queried key")
}
if !bytes.Equal(qres.Response.Value, v) {
panic("returned value does not match sent value")
}
fmt.Println("Sent tx :", string(tx))
fmt.Println("Queried for :", string(qres.Response.Key))
fmt.Println("Got value :", string(qres.Response.Value))
// Output:
// Sent tx : name=satoshi
// Queried for : name
// Got value : satoshi
}
func ExampleHTTP_batching() {
// Start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewKVStoreApplication()
node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig)
defer rpctest.StopTendermint(node)
// Create our RPC client
rpcAddr := rpctest.GetConfig().RPC.ListenAddress
c := client.NewHTTP(rpcAddr, "/websocket")
// Create our two transactions
k1 := []byte("firstName")
v1 := []byte("satoshi")
tx1 := append(k1, append([]byte("="), v1...)...)
k2 := []byte("lastName")
v2 := []byte("nakamoto")
tx2 := append(k2, append([]byte("="), v2...)...)
txs := [][]byte{tx1, tx2}
// Create a new batch
batch := c.NewBatch()
// Queue up our transactions
for _, tx := range txs {
if _, err := batch.BroadcastTxCommit(tx); err != nil {
panic(err)
}
}
// Send the batch of 2 transactions
if _, err := batch.Send(); err != nil {
panic(err)
}
// Now let's query for the original results as a batch
keys := [][]byte{k1, k2}
for _, key := range keys {
if _, err := batch.ABCIQuery("/key", key); err != nil {
panic(err)
}
}
// Send the 2 queries and keep the results
results, err := batch.Send()
if err != nil {
panic(err)
}
// Each result in the returned list is the deserialized result of each
// respective ABCIQuery response
for _, result := range results {
qr, ok := result.(*ctypes.ResultABCIQuery)
if !ok {
panic("invalid result type from ABCIQuery request")
}
fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value))
}
// Output:
// firstName = satoshi
// lastName = nakamoto
}

+ 1
- 1
rpc/client/helpers.go View File

@ -15,7 +15,7 @@ type Waiter func(delta int64) (abort error)
// but you can plug in another one
func DefaultWaitStrategy(delta int64) (abort error) {
if delta > 10 {
return errors.Errorf("Waiting for %d blocks... aborting", delta)
return errors.Errorf("waiting for %d blocks... aborting", delta)
} else if delta > 0 {
// estimate of wait time....
// wait half a second for the next block (in progress)


+ 135
- 52
rpc/client/httpclient.go View File

@ -18,27 +18,72 @@ import (
)
/*
HTTP is a Client implementation that communicates with a tendermint node over
json rpc and websockets.
HTTP is a Client implementation that communicates with a Tendermint node over
JSON RPC and WebSockets.
This is the main implementation you probably want to use in production code.
There are other implementations when calling the tendermint node in-process
There are other implementations when calling the Tendermint node in-process
(Local), or when you want to mock out the server for test code (mock).
You can subscribe for any event published by Tendermint using Subscribe method.
Note delivery is best-effort. If you don't read events fast enough or network
is slow, Tendermint might cancel the subscription. The client will attempt to
Note delivery is best-effort. If you don't read events fast enough or network is
slow, Tendermint might cancel the subscription. The client will attempt to
resubscribe (you don't need to do anything). It will keep trying every second
indefinitely until successful.
Request batching is available for JSON RPC requests over HTTP, which conforms to
the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See
the example for more details.
*/
type HTTP struct {
remote string
rpc *rpcclient.JSONRPCClient
*baseRPCClient
*WSEvents
}
// NewHTTP takes a remote endpoint in the form tcp://<host>:<port>
// and the websocket path (which always seems to be "/websocket")
// BatchHTTP provides the same interface as `HTTP`, but allows for batching of
// requests (as per https://www.jsonrpc.org/specification#batch). Do not
// instantiate directly - rather use the HTTP.NewBatch() method to create an
// instance of this struct.
//
// Batching of HTTP requests is thread-safe in the sense that multiple
// goroutines can each create their own batches and send them using the same
// HTTP client. Multiple goroutines could also enqueue transactions in a single
// batch, but ordering of transactions in the batch cannot be guaranteed in such
// an example.
type BatchHTTP struct {
rpcBatch *rpcclient.JSONRPCRequestBatch
*baseRPCClient
}
// rpcClient is an internal interface to which our RPC clients (batch and
// non-batch) must conform. Acts as an additional code-level sanity check to
// make sure the implementations stay coherent.
type rpcClient interface {
ABCIClient
HistoryClient
NetworkClient
SignClient
StatusClient
}
// baseRPCClient implements the basic RPC method logic without the actual
// underlying RPC call functionality, which is provided by `caller`.
type baseRPCClient struct {
caller rpcclient.JSONRPCCaller
}
var _ rpcClient = (*HTTP)(nil)
var _ rpcClient = (*BatchHTTP)(nil)
var _ rpcClient = (*baseRPCClient)(nil)
//-----------------------------------------------------------------------------
// HTTP
// NewHTTP takes a remote endpoint in the form <protocol>://<host>:<port> and
// the websocket path (which always seems to be "/websocket")
func NewHTTP(remote, wsEndpoint string) *HTTP {
rc := rpcclient.NewJSONRPCClient(remote)
cdc := rc.Codec()
@ -46,39 +91,76 @@ func NewHTTP(remote, wsEndpoint string) *HTTP {
rc.SetCodec(cdc)
return &HTTP{
rpc: rc,
remote: remote,
WSEvents: newWSEvents(cdc, remote, wsEndpoint),
rpc: rc,
remote: remote,
baseRPCClient: &baseRPCClient{caller: rc},
WSEvents: newWSEvents(cdc, remote, wsEndpoint),
}
}
var _ Client = (*HTTP)(nil)
func (c *HTTP) Status() (*ctypes.ResultStatus, error) {
// NewBatch creates a new batch client for this HTTP client.
func (c *HTTP) NewBatch() *BatchHTTP {
rpcBatch := c.rpc.NewRequestBatch()
return &BatchHTTP{
rpcBatch: rpcBatch,
baseRPCClient: &baseRPCClient{
caller: rpcBatch,
},
}
}
//-----------------------------------------------------------------------------
// BatchHTTP
// Send is a convenience function for an HTTP batch that will trigger the
// compilation of the batched requests and send them off using the client as a
// single request. On success, this returns a list of the deserialized results
// from each request in the sent batch.
func (b *BatchHTTP) Send() ([]interface{}, error) {
return b.rpcBatch.Send()
}
// Clear will empty out this batch of requests and return the number of requests
// that were cleared out.
func (b *BatchHTTP) Clear() int {
return b.rpcBatch.Clear()
}
// Count returns the number of enqueued requests waiting to be sent.
func (b *BatchHTTP) Count() int {
return b.rpcBatch.Count()
}
//-----------------------------------------------------------------------------
// baseRPCClient
func (c *baseRPCClient) Status() (*ctypes.ResultStatus, error) {
result := new(ctypes.ResultStatus)
_, err := c.rpc.Call("status", map[string]interface{}{}, result)
_, err := c.caller.Call("status", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "Status")
}
return result, nil
}
func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
func (c *baseRPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
result := new(ctypes.ResultABCIInfo)
_, err := c.rpc.Call("abci_info", map[string]interface{}{}, result)
_, err := c.caller.Call("abci_info", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "ABCIInfo")
}
return result, nil
}
func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
func (c *baseRPCClient) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions)
}
func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (c *baseRPCClient) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
result := new(ctypes.ResultABCIQuery)
_, err := c.rpc.Call("abci_query",
_, err := c.caller.Call("abci_query",
map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove},
result)
if err != nil {
@ -87,89 +169,89 @@ func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQue
return result, nil
}
func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
func (c *baseRPCClient) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
result := new(ctypes.ResultBroadcastTxCommit)
_, err := c.rpc.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result)
_, err := c.caller.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result)
if err != nil {
return nil, errors.Wrap(err, "broadcast_tx_commit")
}
return result, nil
}
func (c *HTTP) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *baseRPCClient) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.broadcastTX("broadcast_tx_async", tx)
}
func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *baseRPCClient) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.broadcastTX("broadcast_tx_sync", tx)
}
func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *baseRPCClient) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
result := new(ctypes.ResultBroadcastTx)
_, err := c.rpc.Call(route, map[string]interface{}{"tx": tx}, result)
_, err := c.caller.Call(route, map[string]interface{}{"tx": tx}, result)
if err != nil {
return nil, errors.Wrap(err, route)
}
return result, nil
}
func (c *HTTP) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
func (c *baseRPCClient) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
result := new(ctypes.ResultUnconfirmedTxs)
_, err := c.rpc.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result)
_, err := c.caller.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result)
if err != nil {
return nil, errors.Wrap(err, "unconfirmed_txs")
}
return result, nil
}
func (c *HTTP) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
func (c *baseRPCClient) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
result := new(ctypes.ResultUnconfirmedTxs)
_, err := c.rpc.Call("num_unconfirmed_txs", map[string]interface{}{}, result)
_, err := c.caller.Call("num_unconfirmed_txs", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "num_unconfirmed_txs")
}
return result, nil
}
func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) {
func (c *baseRPCClient) NetInfo() (*ctypes.ResultNetInfo, error) {
result := new(ctypes.ResultNetInfo)
_, err := c.rpc.Call("net_info", map[string]interface{}{}, result)
_, err := c.caller.Call("net_info", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "NetInfo")
}
return result, nil
}
func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
func (c *baseRPCClient) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
result := new(ctypes.ResultDumpConsensusState)
_, err := c.rpc.Call("dump_consensus_state", map[string]interface{}{}, result)
_, err := c.caller.Call("dump_consensus_state", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "DumpConsensusState")
}
return result, nil
}
func (c *HTTP) ConsensusState() (*ctypes.ResultConsensusState, error) {
func (c *baseRPCClient) ConsensusState() (*ctypes.ResultConsensusState, error) {
result := new(ctypes.ResultConsensusState)
_, err := c.rpc.Call("consensus_state", map[string]interface{}{}, result)
_, err := c.caller.Call("consensus_state", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "ConsensusState")
}
return result, nil
}
func (c *HTTP) Health() (*ctypes.ResultHealth, error) {
func (c *baseRPCClient) Health() (*ctypes.ResultHealth, error) {
result := new(ctypes.ResultHealth)
_, err := c.rpc.Call("health", map[string]interface{}{}, result)
_, err := c.caller.Call("health", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "Health")
}
return result, nil
}
func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
func (c *baseRPCClient) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
result := new(ctypes.ResultBlockchainInfo)
_, err := c.rpc.Call("blockchain",
_, err := c.caller.Call("blockchain",
map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight},
result)
if err != nil {
@ -178,56 +260,56 @@ func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockch
return result, nil
}
func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) {
func (c *baseRPCClient) Genesis() (*ctypes.ResultGenesis, error) {
result := new(ctypes.ResultGenesis)
_, err := c.rpc.Call("genesis", map[string]interface{}{}, result)
_, err := c.caller.Call("genesis", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "Genesis")
}
return result, nil
}
func (c *HTTP) Block(height *int64) (*ctypes.ResultBlock, error) {
func (c *baseRPCClient) Block(height *int64) (*ctypes.ResultBlock, error) {
result := new(ctypes.ResultBlock)
_, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result)
_, err := c.caller.Call("block", map[string]interface{}{"height": height}, result)
if err != nil {
return nil, errors.Wrap(err, "Block")
}
return result, nil
}
func (c *HTTP) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) {
func (c *baseRPCClient) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) {
result := new(ctypes.ResultBlockResults)
_, err := c.rpc.Call("block_results", map[string]interface{}{"height": height}, result)
_, err := c.caller.Call("block_results", map[string]interface{}{"height": height}, result)
if err != nil {
return nil, errors.Wrap(err, "Block Result")
}
return result, nil
}
func (c *HTTP) Commit(height *int64) (*ctypes.ResultCommit, error) {
func (c *baseRPCClient) Commit(height *int64) (*ctypes.ResultCommit, error) {
result := new(ctypes.ResultCommit)
_, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result)
_, err := c.caller.Call("commit", map[string]interface{}{"height": height}, result)
if err != nil {
return nil, errors.Wrap(err, "Commit")
}
return result, nil
}
func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
func (c *baseRPCClient) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
result := new(ctypes.ResultTx)
params := map[string]interface{}{
"hash": hash,
"prove": prove,
}
_, err := c.rpc.Call("tx", params, result)
_, err := c.caller.Call("tx", params, result)
if err != nil {
return nil, errors.Wrap(err, "Tx")
}
return result, nil
}
func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) {
func (c *baseRPCClient) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) {
result := new(ctypes.ResultTxSearch)
params := map[string]interface{}{
"query": query,
@ -235,23 +317,24 @@ func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.Re
"page": page,
"per_page": perPage,
}
_, err := c.rpc.Call("tx_search", params, result)
_, err := c.caller.Call("tx_search", params, result)
if err != nil {
return nil, errors.Wrap(err, "TxSearch")
}
return result, nil
}
func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) {
func (c *baseRPCClient) Validators(height *int64) (*ctypes.ResultValidators, error) {
result := new(ctypes.ResultValidators)
_, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result)
_, err := c.caller.Call("validators", map[string]interface{}{"height": height}, result)
if err != nil {
return nil, errors.Wrap(err, "Validators")
}
return result, nil
}
/** websocket event stuff here... **/
//-----------------------------------------------------------------------------
// WSEvents
type WSEvents struct {
cmn.BaseService


+ 105
- 4
rpc/client/rpc_test.go View File

@ -4,6 +4,7 @@ import (
"fmt"
"net/http"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
@ -11,7 +12,9 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
@ -249,7 +252,8 @@ func TestAppCalls(t *testing.T) {
func TestBroadcastTxSync(t *testing.T) {
require := require.New(t)
mempool := node.MempoolReactor().Mempool
// TODO (melekes): use mempool which is set on RPC rather than getting it from node
mempool := node.Mempool()
initMempoolSize := mempool.Size()
for i, c := range GetClients() {
@ -269,7 +273,7 @@ func TestBroadcastTxSync(t *testing.T) {
func TestBroadcastTxCommit(t *testing.T) {
require := require.New(t)
mempool := node.MempoolReactor().Mempool
mempool := node.Mempool()
for i, c := range GetClients() {
_, _, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(tx)
@ -284,7 +288,7 @@ func TestBroadcastTxCommit(t *testing.T) {
func TestUnconfirmedTxs(t *testing.T) {
_, _, tx := MakeTxKV()
mempool := node.MempoolReactor().Mempool
mempool := node.Mempool()
_ = mempool.CheckTx(tx, nil)
for i, c := range GetClients() {
@ -305,7 +309,7 @@ func TestUnconfirmedTxs(t *testing.T) {
func TestNumUnconfirmedTxs(t *testing.T) {
_, _, tx := MakeTxKV()
mempool := node.MempoolReactor().Mempool
mempool := node.Mempool()
_ = mempool.CheckTx(tx, nil)
mempoolSize := mempool.Size()
@ -441,3 +445,100 @@ func TestTxSearch(t *testing.T) {
require.Len(t, result.Txs, 0)
}
}
func TestBatchedJSONRPCCalls(t *testing.T) {
c := getHTTPClient()
testBatchedJSONRPCCalls(t, c)
}
func testBatchedJSONRPCCalls(t *testing.T, c *client.HTTP) {
k1, v1, tx1 := MakeTxKV()
k2, v2, tx2 := MakeTxKV()
batch := c.NewBatch()
r1, err := batch.BroadcastTxCommit(tx1)
require.NoError(t, err)
r2, err := batch.BroadcastTxCommit(tx2)
require.NoError(t, err)
require.Equal(t, 2, batch.Count())
bresults, err := batch.Send()
require.NoError(t, err)
require.Len(t, bresults, 2)
require.Equal(t, 0, batch.Count())
bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit)
require.True(t, ok)
require.Equal(t, *bresult1, *r1)
bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit)
require.True(t, ok)
require.Equal(t, *bresult2, *r2)
apph := cmn.MaxInt64(bresult1.Height, bresult2.Height) + 1
client.WaitForHeight(c, apph, nil)
q1, err := batch.ABCIQuery("/key", k1)
require.NoError(t, err)
q2, err := batch.ABCIQuery("/key", k2)
require.NoError(t, err)
require.Equal(t, 2, batch.Count())
qresults, err := batch.Send()
require.NoError(t, err)
require.Len(t, qresults, 2)
require.Equal(t, 0, batch.Count())
qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery)
require.True(t, ok)
require.Equal(t, *qresult1, *q1)
qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery)
require.True(t, ok)
require.Equal(t, *qresult2, *q2)
require.Equal(t, qresult1.Response.Key, k1)
require.Equal(t, qresult2.Response.Key, k2)
require.Equal(t, qresult1.Response.Value, v1)
require.Equal(t, qresult2.Response.Value, v2)
}
func TestBatchedJSONRPCCallsCancellation(t *testing.T) {
c := getHTTPClient()
_, _, tx1 := MakeTxKV()
_, _, tx2 := MakeTxKV()
batch := c.NewBatch()
_, err := batch.BroadcastTxCommit(tx1)
require.NoError(t, err)
_, err = batch.BroadcastTxCommit(tx2)
require.NoError(t, err)
// we should have 2 requests waiting
require.Equal(t, 2, batch.Count())
// we want to make sure we cleared 2 pending requests
require.Equal(t, 2, batch.Clear())
// now there should be no batched requests
require.Equal(t, 0, batch.Count())
}
func TestSendingEmptyJSONRPCRequestBatch(t *testing.T) {
c := getHTTPClient()
batch := c.NewBatch()
_, err := batch.Send()
require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error")
}
func TestClearingEmptyJSONRPCRequestBatch(t *testing.T) {
c := getHTTPClient()
batch := c.NewBatch()
require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result")
}
func TestConcurrentJSONRPCBatching(t *testing.T) {
var wg sync.WaitGroup
c := getHTTPClient()
for i := 0; i < 50; i++ {
wg.Add(1)
go func() {
defer wg.Done()
testBatchedJSONRPCCalls(t, c)
}()
}
wg.Wait()
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save