Browse Source

Merge pull request #675 from tendermint/release-v0.11.0

Release v0.11.0
pull/660/merge v0.11.0
Ethan Buchman 7 years ago
committed by GitHub
parent
commit
7682ad9a60
130 changed files with 2343 additions and 1319 deletions
  1. +51
    -29
      CHANGELOG.md
  2. +1
    -1
      CONTRIBUTING.md
  3. +54
    -26
      README.md
  4. +1
    -1
      blockchain/pool.go
  5. +16
    -11
      blockchain/reactor.go
  6. +4
    -6
      cmd/tendermint/commands/gen_validator.go
  7. +5
    -9
      cmd/tendermint/commands/init.go
  8. +2
    -5
      cmd/tendermint/commands/probe_upnp.go
  9. +5
    -7
      cmd/tendermint/commands/replay.go
  10. +15
    -18
      cmd/tendermint/commands/reset_priv_validator.go
  11. +2
    -1
      cmd/tendermint/commands/root.go
  12. +27
    -49
      cmd/tendermint/commands/run_node.go
  13. +3
    -6
      cmd/tendermint/commands/show_validator.go
  14. +14
    -15
      cmd/tendermint/commands/testnet.go
  15. +2
    -5
      cmd/tendermint/commands/version.go
  16. +30
    -2
      cmd/tendermint/main.go
  17. +0
    -7
      config/config.go
  18. +1
    -1
      config/toml.go
  19. +27
    -34
      consensus/byzantine_test.go
  20. +22
    -23
      consensus/common_test.go
  21. +2
    -2
      consensus/height_vote_set_test.go
  22. +1
    -1
      consensus/mempool_test.go
  23. +30
    -23
      consensus/reactor.go
  24. +5
    -5
      consensus/reactor_test.go
  25. +7
    -25
      consensus/replay.go
  26. +5
    -2
      consensus/replay_file.go
  27. +18
    -18
      consensus/replay_test.go
  28. +8
    -14
      consensus/state.go
  29. +9
    -11
      consensus/state_test.go
  30. +2
    -2
      docs/abci-cli.rst
  31. +1
    -1
      docs/app-architecture.rst
  32. +0
    -16
      docs/architecture/ABCI.md
  33. +1
    -12
      docs/architecture/README.md
  34. +0
    -0
      docs/architecture/adr-001-logging.md
  35. +1
    -1
      docs/architecture/adr-002-event-subscription.md
  36. +1
    -1
      docs/architecture/adr-003-abci-app-rpc.md
  37. +38
    -0
      docs/architecture/adr-004-historical-validators.md
  38. +85
    -0
      docs/architecture/adr-005-consensus-params.md
  39. +16
    -0
      docs/architecture/adr-template.md
  40. +0
    -240
      docs/architecture/merkle-frey.md
  41. +0
    -17
      docs/architecture/merkle.md
  42. +0
    -0
      docs/assets/abci.png
  43. +0
    -0
      docs/assets/consensus_logic.png
  44. +0
    -0
      docs/assets/tm-transaction-flow.png
  45. +29
    -1
      docs/conf.py
  46. +47
    -4
      docs/deploy-testnets.rst
  47. +94
    -0
      docs/ecosystem.rst
  48. +22
    -1
      docs/index.rst
  49. +20
    -12
      docs/install.rst
  50. +6
    -6
      docs/introduction.rst
  51. +11
    -11
      docs/specification.rst
  52. +0
    -0
      docs/specification/block-structure.rst
  53. +0
    -0
      docs/specification/byzantine-consensus-algorithm.rst
  54. +0
    -0
      docs/specification/configuration.rst
  55. +0
    -0
      docs/specification/fast-sync.rst
  56. +5
    -5
      docs/specification/genesis.rst
  57. +1
    -1
      docs/specification/light-client-protocol.rst
  58. +0
    -0
      docs/specification/merkle.rst
  59. +0
    -0
      docs/specification/rpc.rst
  60. +0
    -0
      docs/specification/secure-p2p.rst
  61. +0
    -0
      docs/specification/validators.rst
  62. +0
    -0
      docs/specification/wire-protocol.rst
  63. +4
    -4
      docs/using-tendermint.rst
  64. +47
    -39
      glide.lock
  65. +7
    -9
      glide.yaml
  66. +3
    -3
      mempool/reactor.go
  67. +116
    -41
      node/node.go
  68. +6
    -2
      node/node_test.go
  69. +7
    -4
      p2p/listener.go
  70. +71
    -33
      p2p/peer.go
  71. +29
    -23
      p2p/peer_set.go
  72. +99
    -19
      p2p/peer_set_test.go
  73. +3
    -3
      p2p/peer_test.go
  74. +9
    -9
      p2p/pex_reactor.go
  75. +7
    -6
      p2p/pex_reactor_test.go
  76. +26
    -25
      p2p/switch.go
  77. +8
    -8
      p2p/switch_test.go
  78. +10
    -10
      proxy/app_conn.go
  79. +5
    -5
      proxy/app_conn_test.go
  80. +12
    -3
      rpc/client/event_test.go
  81. +4
    -4
      rpc/client/httpclient.go
  82. +3
    -3
      rpc/client/interface.go
  83. +4
    -4
      rpc/client/localclient.go
  84. +3
    -3
      rpc/client/main_test.go
  85. +2
    -1
      rpc/client/mock/abci.go
  86. +4
    -4
      rpc/client/mock/client.go
  87. +13
    -12
      rpc/client/rpc_test.go
  88. +2
    -1
      rpc/core/abci.go
  89. +22
    -4
      rpc/core/blocks.go
  90. +17
    -6
      rpc/core/consensus.go
  91. +2
    -2
      rpc/core/events.go
  92. +3
    -3
      rpc/core/net.go
  93. +4
    -1
      rpc/core/pipe.go
  94. +1
    -1
      rpc/core/routes.go
  95. +3
    -4
      rpc/lib/client/http_client.go
  96. +4
    -4
      rpc/lib/client/ws_client.go
  97. +27
    -21
      rpc/lib/server/handlers.go
  98. +2
    -1
      rpc/lib/server/http_server.go
  99. +54
    -13
      rpc/lib/types/types.go
  100. +32
    -0
      rpc/lib/types/types_test.go

+ 51
- 29
CHANGELOG.md View File

@ -3,36 +3,58 @@
## Roadmap
BREAKING CHANGES:
- Upgrade the header to support better proves on validtors, results, evidence, and possibly more
- Upgrade the header to support better proofs on validtors, results, evidence, and possibly more
- Better support for injecting randomness
- Pass evidence/voteInfo through ABCI
- Upgrade consensus for more real-time use of evidence
FEATURES:
- Peer reputation management
- Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay)
- Improved support for querying history and state
- Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay)
- Improved support for querying history and state
- Add authentication and rate-limitting to the RPC
IMPROVEMENTS:
- Improve subtleties around mempool caching and logic
- Consensus optimizations:
- Improve subtleties around mempool caching and logic
- Consensus optimizations:
- cache block parts for faster agreement after round changes
- propagate block parts rarest first
- Better testing of the consensus state machine (ie. use a DSL)
- Better testing of the consensus state machine (ie. use a DSL)
- Auto compiled serialization/deserialization code instead of go-wire reflection
BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness
## 0.10.4 (Septemeber 5, 2017)
## 0.11.0 (September 22, 2017)
BREAKING:
- state: every validator set change is persisted to disk, which required some changes to the `State` structure.
- cmd: if there is no genesis, exit immediately instead of waiting around for one to show.
- p2p: new `p2p.Peer` interface used for all reactor methods (instead of `*p2p.Peer` struct).
- types: `Signer.Sign` returns an error.
- rpc: various changes to match JSONRPC spec (http://www.jsonrpc.org/specification), including breaking ones:
- requests that previously returned HTTP code 4XX now return 200 with an error code in the JSONRPC.
- `rpctypes.RPCResponse` uses new `RPCError` type instead of `string`.
- abci: Info, BeginBlock, InitChain all take structs
FEATURES:
- rpc: `/validators?height=X` allows querying of validators at previous heights.
- rpc: Leaving the `height` param empty for `/block`, `/validators`, and `/commit` will return the value for the latest height.
IMPROVEMENTS:
- docs: Moved all docs from the website and tools repo in, converted to `.rst`, and cleaned up for presentation on `tendermint.readthedocs.io`
BUG FIXES:
- fix WAL openning issue on Windows
## 0.10.4 (September 5, 2017)
IMPROVEMENTS:
- docs: Added Slate docs to each rpc function (see rpc/core)
- docs: Ported all website docs to Read The Docs
- docs: Added Slate docs to each rpc function (see rpc/core)
- docs: Ported all website docs to Read The Docs
- config: expose some p2p params to tweak performance: RecvRate, SendRate, and MaxMsgPacketPayloadSize
- rpc: Upgrade the websocket client and server, including improved auto reconnect, and proper ping/pong
@ -72,7 +94,7 @@ IMPROVEMENTS:
FEATURES:
- Use `--trace` to get stack traces for logged errors
- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set
- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set
- types: GenesisDocFromFile parses a GenesiDoc from a JSON file
IMPROVEMENTS:
@ -96,7 +118,7 @@ Also includes the Grand Repo-Merge of 2017.
BREAKING CHANGES:
- Config and Flags:
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
- This affects the following flags:
- `--seeds` is now `--p2p.seeds`
@ -109,16 +131,16 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
```
[p2p]
laddr="tcp://1.2.3.4:46656"
[consensus]
timeout_propose=1000
```
- Use viper and `DefaultConfig() / TestConfig()` functions to handle defaults, and remove `config/tendermint` and `config/tendermint_test`
- Change some function and method signatures to
- Change some function and method signatures to
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config
- Logger
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
@ -161,7 +183,7 @@ IMPROVEMENTS:
- Limit `/blockchain_info` call to return a maximum of 20 blocks
- Use `.Wrap()` and `.Unwrap()` instead of eg. `PubKeyS` for `go-crypto` types
- RPC JSON responses use pretty printing (via `json.MarshalIndent`)
- Color code different instances of the consensus for tests
- Color code different instances of the consensus for tests
- Isolate viper to `cmd/tendermint/commands` and do not read config from file for tests
@ -189,7 +211,7 @@ IMPROVEMENTS:
- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0)
- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks
BUG FIXES:
BUG FIXES:
- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later
- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save()
@ -205,7 +227,7 @@ message RequestQuery{
bytes data = 1;
string path = 2;
uint64 height = 3;
bool prove = 4;
bool prove = 4;
}
message ResponseQuery{
@ -229,7 +251,7 @@ type BlockMeta struct {
}
```
- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes.
- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes.
- `tendermint gen_validator` command output is now pure JSON
@ -272,7 +294,7 @@ type BlockID struct {
}
```
- `Vote` data type now includes validator address and index:
- `Vote` data type now includes validator address and index:
```
type Vote struct {
@ -292,7 +314,7 @@ type Vote struct {
FEATURES:
- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23,
- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23,
in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts:
```
@ -315,7 +337,7 @@ IMPROVEMENTS:
- Less verbose logging
- Better test coverage (37% -> 49%)
- Canonical SignBytes for signable types
- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile
- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile
- Better in-process testing for the consensus reactor and byzantine faults
- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points
- Better abstraction over timeout mechanics
@ -395,7 +417,7 @@ FEATURES:
- TMSP and RPC support TCP and UNIX sockets
- Addition config options including block size and consensus parameters
- New WAL mode `cswal_light`; logs only the validator's own votes
- New RPC endpoints:
- New RPC endpoints:
- for starting/stopping profilers, and for updating config
- `/broadcast_tx_commit`, returns when tx is included in a block, else an error
- `/unsafe_flush_mempool`, empties the mempool
@ -416,14 +438,14 @@ BUG FIXES:
Strict versioning only began with the release of v0.7.0, in late summer 2016.
The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year.
Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries),
Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries),
many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine.
That implementation now forms the heart of [Burrow](https://github.com/hyperledger/burrow).
In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation.
By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the
By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the
invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP).
The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine
driving an application running in another process.
driving an application running in another process.
The ABCI interface and implementation were iterated on and improved over the course of 2016,
until versioned history kicked in with v0.7.0.

+ 1
- 1
CONTRIBUTING.md View File

@ -1,6 +1,6 @@
# Contributing
Thank you for considering making contributions to Tendermint and related repositories (Basecoin, Merkleeyes, etc.)!
Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards.
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!


+ 54
- 26
README.md View File

@ -13,22 +13,17 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874
[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
Branch | Tests | Coverage | Report Card
----------|-------|----------|-------------
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/develop)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/develop)
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/master)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/master)
Branch | Tests | Coverage
----------|-------|----------
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
_NOTE: This is alpha software. Please contact us if you intend to run it in production._
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language,
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
For more background, see the [introduction](https://tendermint.com/intro).
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
### Code of Conduct
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
For more information, from introduction to install to application development, [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master).
## Install
@ -38,39 +33,72 @@ To install from source, you should be able to:
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
For more details (or if it fails), see the [install guide](https://tendermint.com/docs/guides/install-from-source).
## Contributing
Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md).
For more details (or if it fails), [read the docs](http://tendermint.readthedocs.io/projects/tools/en/master/install.html).
## Resources
### Tendermint Core
- [Introduction](https://tendermint.com/intro)
- [Docs](https://tendermint.com/docs)
- [Software using Tendermint](https://tendermint.com/ecosystem)
All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
### Sub-projects
* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface
* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library
* [Go-Crypto](http://github.com/tendermint/go-crypto), an elliptic curve cryptography library
* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries
* [Merkleeyes](http://github.com/tendermint/merkleeyes), a balanced, binary Merkle tree for ABCI apps
* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
### Tools
* [Deployment, Benchmarking, and Monitoring](https://github.com/tendermint/tools)
* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools)
### Applications
* [Ethermint](http://github.com/tendermint/ethermint): Ethereum on Tendermint
* [Basecoin](http://github.com/tendermint/basecoin), a cryptocurrency application framework
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
### More
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
* [Tendermint Blog](https://blog.cosmos.network/tendermint/home)
* [Cosmos Blog](https://blog.cosmos.network)
* [Original Whitepaper (out-of-date)](http://www.the-blockchain.com/docs/Tendermint%20Consensus%20without%20Mining.pdf)
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
## Contributing
Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md).
## Versioning
### SemVer
Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes.
According to SemVer, anything in the public API can change at any time before version 1.0.0
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
to signal breaking changes across a subset of the total public API. This subset includes all
interfaces exposed to other processes (cli, rpc, p2p, etc.), as well as parts of the following packages:
- types
- rpc/client
- config
- node
Exported objects in these packages that are not covered by the versioning scheme
are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time.
Functions, types, and values in any other package may also change at any time.
### Upgrades
In an effort to avoid accumulating technical debt prior to 1.0.0,
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
will work with existing tendermint blockchains. In these cases you will
have to start a new blockchain, or write something custom to get the old
data into the new chain.
However, any bump in the PATCH version should be compatible with existing histories
(if not please open an [issue](https://github.com/tendermint/tendermint/issues)).
## Code of Conduct
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).

+ 1
- 1
blockchain/pool.go View File

@ -352,7 +352,7 @@ func (peer *bpPeer) setLogger(l log.Logger) {
func (peer *bpPeer) resetMonitor() {
peer.recvMonitor = flow.New(time.Second, time.Second*40)
var initialValue = float64(minRecvRate) * math.E
initialValue := float64(minRecvRate) * math.E
peer.recvMonitor.SetREMA(initialValue)
}


+ 16
- 11
blockchain/reactor.go View File

@ -28,7 +28,6 @@ const (
statusUpdateIntervalSeconds = 10
// check if we should switch to consensus reactor
switchToConsensusIntervalSeconds = 1
maxBlockchainResponseSize = types.MaxBlockSize + 2
)
type consensusReactor interface {
@ -111,20 +110,20 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) {
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
// doing nothing, will try later in `poolRoutine`
}
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
bcR.pool.RemovePeer(peer.Key)
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
bcR.pool.RemovePeer(peer.Key())
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes, bcR.maxMsgSize())
if err != nil {
bcR.Logger.Error("Error decoding message", "err", err)
return
@ -148,7 +147,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
}
case *bcBlockResponseMessage:
// Got a block.
bcR.pool.AddBlock(src.Key, msg.Block, len(msgBytes))
bcR.pool.AddBlock(src.Key(), msg.Block, len(msgBytes))
case *bcStatusRequestMessage:
// Send peer our state.
queued := src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
@ -157,12 +156,18 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
}
case *bcStatusResponseMessage:
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.Key, msg.Height)
bcR.pool.SetPeerHeight(src.Key(), msg.Height)
default:
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// maxMsgSize returns the maximum allowable size of a
// message on the blockchain reactor.
func (bcR *BlockchainReactor) maxMsgSize() int {
return bcR.state.Params().BlockSizeParams.MaxBytes + 2
}
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
@ -221,7 +226,7 @@ FOR_LOOP:
// We need both to sync the first block.
break SYNC_LOOP
}
firstParts := first.MakePartSet(types.DefaultBlockPartSize)
firstParts := first.MakePartSet(bcR.state.Params().BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
@ -290,11 +295,11 @@ var _ = wire.RegisterInterface(
// DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage, err error) {
msgType = bz[0]
n := int(0)
r := bytes.NewReader(bz)
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxBlockchainResponseSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
if err != nil && n != len(bz) {
err = errors.New("DecodeMessage() had bytes left over")
}


+ 4
- 6
cmd/tendermint/commands/gen_validator.go View File

@ -9,18 +9,16 @@ import (
"github.com/tendermint/tendermint/types"
)
var genValidatorCmd = &cobra.Command{
// GenValidatorCmd allows the generation of a keypair for a
// validator.
var GenValidatorCmd = &cobra.Command{
Use: "gen_validator",
Short: "Generate new validator keypair",
Run: genValidator,
}
func init() {
RootCmd.AddCommand(genValidatorCmd)
}
func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidator()
privValidator := types.GenPrivValidatorFS("")
privValidatorJSONBytes, _ := json.MarshalIndent(privValidator, "", "\t")
fmt.Printf(`%v
`, string(privValidatorJSONBytes))


+ 5
- 9
cmd/tendermint/commands/init.go View File

@ -9,21 +9,17 @@ import (
cmn "github.com/tendermint/tmlibs/common"
)
var initFilesCmd = &cobra.Command{
// InitFilesCmd initialises a fresh Tendermint Core instance.
var InitFilesCmd = &cobra.Command{
Use: "init",
Short: "Initialize Tendermint",
Run: initFiles,
}
func init() {
RootCmd.AddCommand(initFilesCmd)
}
func initFiles(cmd *cobra.Command, args []string) {
privValFile := config.PrivValidatorFile()
if _, err := os.Stat(privValFile); os.IsNotExist(err) {
privValidator := types.GenPrivValidator()
privValidator.SetFile(privValFile)
privValidator := types.GenPrivValidatorFS(privValFile)
privValidator.Save()
genFile := config.GenesisFile()
@ -33,8 +29,8 @@ func initFiles(cmd *cobra.Command, args []string) {
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
}
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
PubKey: privValidator.PubKey,
Amount: 10,
PubKey: privValidator.GetPubKey(),
Power: 10,
}}
genDoc.SaveAs(genFile)


+ 2
- 5
cmd/tendermint/commands/probe_upnp.go View File

@ -9,16 +9,13 @@ import (
"github.com/tendermint/tendermint/p2p/upnp"
)
var probeUpnpCmd = &cobra.Command{
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
var ProbeUpnpCmd = &cobra.Command{
Use: "probe_upnp",
Short: "Test UPnP functionality",
RunE: probeUpnp,
}
func init() {
RootCmd.AddCommand(probeUpnpCmd)
}
func probeUpnp(cmd *cobra.Command, args []string) error {
capabilities, err := upnp.Probe(logger)
if err != nil {


+ 5
- 7
cmd/tendermint/commands/replay.go View File

@ -6,7 +6,8 @@ import (
"github.com/tendermint/tendermint/consensus"
)
var replayCmd = &cobra.Command{
// ReplayCmd allows replaying of messages from the WAL.
var ReplayCmd = &cobra.Command{
Use: "replay",
Short: "Replay messages from WAL",
Run: func(cmd *cobra.Command, args []string) {
@ -14,15 +15,12 @@ var replayCmd = &cobra.Command{
},
}
var replayConsoleCmd = &cobra.Command{
// ReplayConsoleCmd allows replaying of messages from the WAL in a
// console.
var ReplayConsoleCmd = &cobra.Command{
Use: "replay_console",
Short: "Replay messages from WAL in a console",
Run: func(cmd *cobra.Command, args []string) {
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
},
}
func init() {
RootCmd.AddCommand(replayCmd)
RootCmd.AddCommand(replayConsoleCmd)
}

+ 15
- 18
cmd/tendermint/commands/reset_priv_validator.go View File

@ -9,21 +9,27 @@ import (
"github.com/tendermint/tmlibs/log"
)
var resetAllCmd = &cobra.Command{
// ResetAllCmd removes the database of this Tendermint core
// instance.
var ResetAllCmd = &cobra.Command{
Use: "unsafe_reset_all",
Short: "(unsafe) Remove all the data and WAL, reset this node's validator",
Run: resetAll,
}
var resetPrivValidatorCmd = &cobra.Command{
// ResetPrivValidatorCmd resets the private validator files.
var ResetPrivValidatorCmd = &cobra.Command{
Use: "unsafe_reset_priv_validator",
Short: "(unsafe) Reset this node's validator",
Run: resetPrivValidator,
}
func init() {
RootCmd.AddCommand(resetAllCmd)
RootCmd.AddCommand(resetPrivValidatorCmd)
// ResetAll removes the privValidator files.
// Exported so other CLI tools can use it
func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorFS(privValFile, logger)
os.RemoveAll(dbDir)
logger.Info("Removed all data", "dir", dbDir)
}
// XXX: this is totally unsafe.
@ -35,26 +41,17 @@ func resetAll(cmd *cobra.Command, args []string) {
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) {
resetPrivValidatorLocal(config.PrivValidatorFile(), logger)
}
// Exported so other CLI tools can use it
func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorLocal(privValFile, logger)
os.RemoveAll(dbDir)
logger.Info("Removed all data", "dir", dbDir)
resetPrivValidatorFS(config.PrivValidatorFile(), logger)
}
func resetPrivValidatorLocal(privValFile string, logger log.Logger) {
func resetPrivValidatorFS(privValFile string, logger log.Logger) {
// Get PrivValidator
var privValidator *types.PrivValidator
if _, err := os.Stat(privValFile); err == nil {
privValidator = types.LoadPrivValidator(privValFile)
privValidator := types.LoadPrivValidatorFS(privValFile)
privValidator.Reset()
logger.Info("Reset PrivValidator", "file", privValFile)
} else {
privValidator = types.GenPrivValidator()
privValidator.SetFile(privValFile)
privValidator := types.GenPrivValidatorFS(privValFile)
privValidator.Save()
logger.Info("Generated PrivValidator", "file", privValFile)
}


+ 2
- 1
cmd/tendermint/commands/root.go View File

@ -34,11 +34,12 @@ func ParseConfig() (*cfg.Config, error) {
return conf, err
}
// RootCmd is the root command for Tendermint core.
var RootCmd = &cobra.Command{
Use: "tendermint",
Short: "Tendermint Core (BFT Consensus) in Go",
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
if cmd.Name() == versionCmd.Name() {
if cmd.Name() == VersionCmd.Name() {
return nil
}
config, err = ParseConfig()


+ 27
- 49
cmd/tendermint/commands/run_node.go View File

@ -2,26 +2,12 @@ package commands
import (
"fmt"
"time"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
nm "github.com/tendermint/tendermint/node"
)
var runNodeCmd = &cobra.Command{
Use: "node",
Short: "Run the tendermint node",
RunE: runNode,
}
func init() {
AddNodeFlags(runNodeCmd)
RootCmd.AddCommand(runNodeCmd)
}
// AddNodeFlags exposes some common configuration options on the command-line
// These are exposed for convenience of commands embedding a tendermint node
func AddNodeFlags(cmd *cobra.Command) {
@ -50,40 +36,32 @@ func AddNodeFlags(cmd *cobra.Command) {
cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes")
}
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
// should import github.com/tendermint/tendermint/node and implement
// their own run_node to call node.NewNode (instead of node.NewNodeDefault)
// with their custom priv validator and/or custom proxy.ClientCreator
func runNode(cmd *cobra.Command, args []string) error {
// Wait until the genesis doc becomes available
// This is for Mintnet compatibility.
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GenesisFile()
for !cmn.FileExists(genDocFile) {
logger.Info(cmn.Fmt("Waiting for genesis file %v...", genDocFile))
time.Sleep(time.Second)
// NewRunNodeCmd returns the command that allows the CLI to start a
// node. It can be used with a custom PrivValidator and in-process ABCI application.
func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
cmd := &cobra.Command{
Use: "node",
Short: "Run the tendermint node",
RunE: func(cmd *cobra.Command, args []string) error {
// Create & start node
n, err := nodeProvider(config, logger)
if err != nil {
return fmt.Errorf("Failed to create node: %v", err)
}
if _, err := n.Start(); err != nil {
return fmt.Errorf("Failed to start node: %v", err)
} else {
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
}
// Trap signal, run forever.
n.RunForever()
return nil
},
}
genDoc, err := types.GenesisDocFromFile(genDocFile)
if err != nil {
return err
}
config.ChainID = genDoc.ChainID
// Create & start node
n := node.NewNodeDefault(config, logger.With("module", "node"))
if _, err := n.Start(); err != nil {
return fmt.Errorf("Failed to start node: %v", err)
} else {
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
}
// Trap signal, run forever.
n.RunForever()
return nil
AddNodeFlags(cmd)
return cmd
}

+ 3
- 6
cmd/tendermint/commands/show_validator.go View File

@ -9,18 +9,15 @@ import (
"github.com/tendermint/tendermint/types"
)
var showValidatorCmd = &cobra.Command{
// ShowValidatorCmd adds capabilities for showing the validator info.
var ShowValidatorCmd = &cobra.Command{
Use: "show_validator",
Short: "Show this node's validator info",
Run: showValidator,
}
func init() {
RootCmd.AddCommand(showValidatorCmd)
}
func showValidator(cmd *cobra.Command, args []string) {
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile(), logger)
privValidator := types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile())
pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey)
fmt.Println(string(pubKeyJSONBytes))
}

+ 14
- 15
cmd/tendermint/commands/testnet.go View File

@ -7,16 +7,10 @@ import (
"github.com/spf13/cobra"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
)
var testnetFilesCmd = &cobra.Command{
Use: "testnet",
Short: "Initialize files for a Tendermint testnet",
Run: testnetFiles,
}
//flags
var (
nValidators int
@ -24,12 +18,18 @@ var (
)
func init() {
testnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
TestnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
"Number of validators to initialize the testnet with")
testnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
TestnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
"Directory to store initialization data for the testnet")
}
RootCmd.AddCommand(testnetFilesCmd)
// TestnetFilesCmd allows initialisation of files for a
// Tendermint testnet.
var TestnetFilesCmd = &cobra.Command{
Use: "testnet",
Short: "Initialize files for a Tendermint testnet",
Run: testnetFiles,
}
func testnetFiles(cmd *cobra.Command, args []string) {
@ -45,10 +45,10 @@ func testnetFiles(cmd *cobra.Command, args []string) {
}
// Read priv_validator.json to populate vals
privValFile := path.Join(dataDir, mach, "priv_validator.json")
privVal := types.LoadPrivValidator(privValFile)
privVal := types.LoadPrivValidatorFS(privValFile)
genVals[i] = types.GenesisValidator{
PubKey: privVal.PubKey,
Amount: 1,
PubKey: privVal.GetPubKey(),
Power: 1,
Name: mach,
}
}
@ -87,7 +87,6 @@ func ensurePrivValidator(file string) {
if cmn.FileExists(file) {
return
}
privValidator := types.GenPrivValidator()
privValidator.SetFile(file)
privValidator := types.GenPrivValidatorFS(file)
privValidator.Save()
}

+ 2
- 5
cmd/tendermint/commands/version.go View File

@ -8,14 +8,11 @@ import (
"github.com/tendermint/tendermint/version"
)
var versionCmd = &cobra.Command{
// VersionCmd ...
var VersionCmd = &cobra.Command{
Use: "version",
Short: "Show version info",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(version.Version)
},
}
func init() {
RootCmd.AddCommand(versionCmd)
}

+ 30
- 2
cmd/tendermint/main.go View File

@ -3,11 +3,39 @@ package main
import (
"os"
"github.com/tendermint/tendermint/cmd/tendermint/commands"
"github.com/tendermint/tmlibs/cli"
cmd "github.com/tendermint/tendermint/cmd/tendermint/commands"
nm "github.com/tendermint/tendermint/node"
)
func main() {
cmd := cli.PrepareBaseCmd(commands.RootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
rootCmd := cmd.RootCmd
rootCmd.AddCommand(
cmd.GenValidatorCmd,
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.ReplayCmd,
cmd.ReplayConsoleCmd,
cmd.ResetAllCmd,
cmd.ResetPrivValidatorCmd,
cmd.ShowValidatorCmd,
cmd.TestnetFilesCmd,
cmd.VersionCmd)
// NOTE:
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
// * Supply a genesis doc file from another source
// * Provide their own DB implementation
// can copy this file and use something other than the
// DefaultNewNode function
nodeFunc := nm.DefaultNewNode
// Create & start node
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
cmd.Execute()
}

+ 0
- 7
config/config.go View File

@ -4,8 +4,6 @@ import (
"fmt"
"path/filepath"
"time"
"github.com/tendermint/tendermint/types" // TODO: remove
)
// Config defines the top level configuration for a Tendermint node
@ -320,10 +318,6 @@ type ConsensusConfig struct {
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"`
// TODO: This probably shouldn't be exposed but it makes it
// easy to write tests for the wal/replay
BlockPartSize int `mapstructure:"block_part_size"`
// Reactor sleep duration parameters are in ms
PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"`
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
@ -386,7 +380,6 @@ func DefaultConsensusConfig() *ConsensusConfig {
MaxBlockSizeBytes: 1, // TODO
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0,
BlockPartSize: types.DefaultBlockPartSize, // TODO: we shouldnt be importing types
PeerGossipSleepDuration: 100,
PeerQueryMaj23SleepDuration: 2000,
}


+ 1
- 1
config/toml.go View File

@ -119,7 +119,7 @@ var testGenesis = `{
"type": "ed25519",
"data":"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
},
"amount": 10,
"power": 10,
"name": ""
}
],


+ 27
- 34
consensus/byzantine_test.go View File

@ -5,6 +5,8 @@ import (
"testing"
"time"
crypto "github.com/tendermint/go-crypto"
data "github.com/tendermint/go-wire/data"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
@ -53,7 +55,7 @@ func TestByzantine(t *testing.T) {
eventLogger := logger.With("module", "events")
for i := 0; i < N; i++ {
if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator.(*types.PrivValidator))
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator)
// make byzantine
css[i].decideProposal = func(j int) func(int, int) {
return func(height, round int) {
@ -188,7 +190,7 @@ func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusS
}
}
func sendProposalAndParts(height, round int, cs *ConsensusState, peer *p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
func sendProposalAndParts(height, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
// proposal
msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
@ -231,14 +233,14 @@ func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor {
func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
if !br.reactor.IsRunning() {
return
}
// Create peerState for peer
peerState := NewPeerState(peer)
peer.Data.Set(types.PeerStateKey, peerState)
peerState := NewPeerState(peer).SetLogger(br.reactor.Logger)
peer.Set(types.PeerStateKey, peerState)
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
@ -246,10 +248,10 @@ func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
br.reactor.sendNewRoundStepMessages(peer)
}
}
func (br *ByzantineReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
br.reactor.RemovePeer(peer, reason)
}
func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte) {
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes)
}
@ -257,51 +259,42 @@ func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte)
// byzantine privValidator
type ByzantinePrivValidator struct {
Address []byte `json:"address"`
types.Signer `json:"-"`
types.Signer
mtx sync.Mutex
pv types.PrivValidator
}
// Return a priv validator that will sign anything
func NewByzantinePrivValidator(pv *types.PrivValidator) *ByzantinePrivValidator {
func NewByzantinePrivValidator(pv types.PrivValidator) *ByzantinePrivValidator {
return &ByzantinePrivValidator{
Address: pv.Address,
Signer: pv.Signer,
Signer: pv.(*types.PrivValidatorFS).Signer,
pv: pv,
}
}
func (privVal *ByzantinePrivValidator) GetAddress() []byte {
return privVal.Address
func (privVal *ByzantinePrivValidator) GetAddress() data.Bytes {
return privVal.pv.GetAddress()
}
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
// Sign
vote.Signature = privVal.Sign(types.SignBytes(chainID, vote))
return nil
func (privVal *ByzantinePrivValidator) GetPubKey() crypto.PubKey {
return privVal.pv.GetPubKey()
}
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) (err error) {
vote.Signature, err = privVal.Sign(types.SignBytes(chainID, vote))
return err
}
// Sign
proposal.Signature = privVal.Sign(types.SignBytes(chainID, proposal))
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
proposal.Signature, err = privVal.Sign(types.SignBytes(chainID, proposal))
return nil
}
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
// Sign
heartbeat.Signature = privVal.Sign(types.SignBytes(chainID, heartbeat))
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
heartbeat.Signature, err = privVal.Sign(types.SignBytes(chainID, heartbeat))
return nil
}
func (privVal *ByzantinePrivValidator) String() string {
return Fmt("PrivValidator{%X}", privVal.Address)
return Fmt("PrivValidator{%X}", privVal.GetAddress())
}

+ 22
- 23
consensus/common_test.go View File

@ -50,12 +50,12 @@ type validatorStub struct {
Index int // Validator index. NOTE: we don't assume validator set changes.
Height int
Round int
*types.PrivValidator
types.PrivValidator
}
var testMinPower = 10
func NewValidatorStub(privValidator *types.PrivValidator, valIndex int) *validatorStub {
func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub {
return &validatorStub{
Index: valIndex,
PrivValidator: privValidator,
@ -65,7 +65,7 @@ func NewValidatorStub(privValidator *types.PrivValidator, valIndex int) *validat
func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
vote := &types.Vote{
ValidatorIndex: vs.Index,
ValidatorAddress: vs.PrivValidator.Address,
ValidatorAddress: vs.PrivValidator.GetAddress(),
Height: vs.Height,
Round: vs.Round,
Type: voteType,
@ -142,7 +142,7 @@ func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.P
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
prevotes := cs.Votes.Prevotes(round)
var vote *types.Vote
if vote = prevotes.GetByAddress(privVal.Address); vote == nil {
if vote = prevotes.GetByAddress(privVal.GetAddress()); vote == nil {
panic("Failed to find prevote from validator")
}
if blockHash == nil {
@ -159,7 +159,7 @@ func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *valid
func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) {
votes := cs.LastCommit
var vote *types.Vote
if vote = votes.GetByAddress(privVal.Address); vote == nil {
if vote = votes.GetByAddress(privVal.GetAddress()); vote == nil {
panic("Failed to find precommit from validator")
}
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
@ -170,7 +170,7 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
precommits := cs.Votes.Precommits(thisRound)
var vote *types.Vote
if vote = precommits.GetByAddress(privVal.Address); vote == nil {
if vote = precommits.GetByAddress(privVal.GetAddress()); vote == nil {
panic("Failed to find precommit from validator")
}
@ -225,11 +225,11 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
//-------------------------------------------------------------------------------
// consensus states
func newConsensusState(state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
func newConsensusState(state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
return newConsensusStateWithConfig(config, state, pv, app)
}
func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
// Get BlockStore
blockDB := dbm.NewMemDB()
blockStore := bc.NewBlockStore(blockDB)
@ -258,17 +258,17 @@ func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv *ty
return cs
}
func loadPrivValidator(config *cfg.Config) *types.PrivValidator {
func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS {
privValidatorFile := config.PrivValidatorFile()
ensureDir(path.Dir(privValidatorFile), 0700)
privValidator := types.LoadOrGenPrivValidator(privValidatorFile, log.TestingLogger())
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
privValidator.Reset()
return privValidator
}
func fixedConsensusStateDummy() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
@ -338,7 +338,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state, _ := sm.MakeGenesisState(db, genDoc)
state.SetLogger(logger.With("module", "state", "validator", i))
state.Save()
thisConfig := ResetConfig(Fmt("%s_%d", testName, i))
@ -359,18 +359,17 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
css := make([]*ConsensusState, nPeers)
for i := 0; i < nPeers; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state, _ := sm.MakeGenesisState(db, genDoc)
state.SetLogger(log.TestingLogger().With("module", "state"))
state.Save()
thisConfig := ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
var privVal *types.PrivValidator
var privVal types.PrivValidator
if i < nValidators {
privVal = privVals[i]
} else {
privVal = types.GenPrivValidator()
_, tempFilePath := Tempfile("priv_validator_")
privVal.SetFile(tempFilePath)
privVal = types.GenPrivValidatorFS(tempFilePath)
}
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, appFunc())
@ -380,9 +379,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
return css
}
func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
for i, s := range switches {
if bytes.Equal(peer.NodeInfo.PubKey.Address(), s.NodeInfo().PubKey.Address()) {
if bytes.Equal(peer.NodeInfo().PubKey.Address(), s.NodeInfo().PubKey.Address()) {
return i
}
}
@ -393,14 +392,14 @@ func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
//-------------------------------------------------------------------------------
// genesis
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidator) {
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidatorFS) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]*types.PrivValidator, numValidators)
privValidators := make([]*types.PrivValidatorFS, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Amount: val.VotingPower,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
@ -412,10 +411,10 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
}, privValidators
}
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidatorFS) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
db := dbm.NewMemDB()
s0 := sm.MakeGenesisState(db, genDoc)
s0, _ := sm.MakeGenesisState(db, genDoc)
s0.SetLogger(log.TestingLogger().With("module", "state"))
s0.Save()
return s0, privValidators


+ 2
- 2
consensus/height_vote_set_test.go View File

@ -44,10 +44,10 @@ func TestPeerCatchupRounds(t *testing.T) {
}
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidator, valIndex int) *types.Vote {
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote {
privVal := privVals[valIndex]
vote := &types.Vote{
ValidatorAddress: privVal.Address,
ValidatorAddress: privVal.GetAddress(),
ValidatorIndex: valIndex,
Height: height,
Round: round,


+ 1
- 1
consensus/mempool_test.go View File

@ -183,7 +183,7 @@ func NewCounterApplication() *CounterApplication {
return &CounterApplication{}
}
func (app *CounterApplication) Info() abci.ResponseInfo {
func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: Fmt("txs:%v", app.txCount)}
}


+ 30
- 23
consensus/reactor.go View File

@ -120,14 +120,14 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
}
// AddPeer implements Reactor
func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
if !conR.IsRunning() {
return
}
// Create peerState for peer
peerState := NewPeerState(peer)
peer.Data.Set(types.PeerStateKey, peerState)
peerState := NewPeerState(peer).SetLogger(conR.Logger)
peer.Set(types.PeerStateKey, peerState)
// Begin routines for this peer.
go conR.gossipDataRoutine(peer, peerState)
@ -142,12 +142,12 @@ func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
}
// RemovePeer implements Reactor
func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
if !conR.IsRunning() {
return
}
// TODO
//peer.Data.Get(PeerStateKey).(*PeerState).Disconnect()
//peer.Get(PeerStateKey).(*PeerState).Disconnect()
}
// Receive implements Reactor
@ -156,7 +156,7 @@ func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
// Peer state updates can happen in parallel, but processing of
// proposals, block parts, and votes are ordered by the receiveRoutine
// NOTE: blocks on consensus state for proposals, block parts, and votes
func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
if !conR.IsRunning() {
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
return
@ -171,7 +171,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
// Get peer states
ps := src.Data.Get(types.PeerStateKey).(*PeerState)
ps := src.Get(types.PeerStateKey).(*PeerState)
switch chID {
case StateChannel:
@ -191,7 +191,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
return
}
// Peer claims to have a maj23 for some BlockID at H,R,S,
votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.Key, msg.BlockID)
votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.Key(), msg.BlockID)
// Respond with a VoteSetBitsMessage showing which votes we have.
// (and consequently shows which we don't have)
var ourVotes *cmn.BitArray
@ -228,12 +228,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
switch msg := msg.(type) {
case *ProposalMessage:
ps.SetHasProposal(msg.Proposal)
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key}
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key()}
case *ProposalPOLMessage:
ps.ApplyProposalPOLMessage(msg)
case *BlockPartMessage:
ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index)
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key}
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key()}
default:
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
@ -253,7 +253,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
ps.SetHasVote(msg.Vote)
cs.peerMsgQueue <- msgInfo{msg, src.Key}
cs.peerMsgQueue <- msgInfo{msg, src.Key()}
default:
// don't punish (leave room for soft upgrades)
@ -367,7 +367,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
/*
// TODO: Make this broadcast more selective.
for _, peer := range conR.Switch.Peers().List() {
ps := peer.Data.Get(PeerStateKey).(*PeerState)
ps := peer.Get(PeerStateKey).(*PeerState)
prs := ps.GetRoundState()
if prs.Height == vote.Height {
// TODO: Also filter on round?
@ -399,7 +399,7 @@ func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *
return
}
func (conR *ConsensusReactor) sendNewRoundStepMessages(peer *p2p.Peer) {
func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) {
rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
@ -410,7 +410,7 @@ func (conR *ConsensusReactor) sendNewRoundStepMessages(peer *p2p.Peer) {
}
}
func (conR *ConsensusReactor) gossipDataRoutine(peer *p2p.Peer, ps *PeerState) {
func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
@ -492,7 +492,7 @@ OUTER_LOOP:
}
func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *RoundState,
prs *PeerRoundState, ps *PeerState, peer *p2p.Peer) {
prs *PeerRoundState, ps *PeerState, peer p2p.Peer) {
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
// Ensure that the peer's PartSetHeader is correct
@ -534,7 +534,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *RoundS
}
}
func (conR *ConsensusReactor) gossipVotesRoutine(peer *p2p.Peer, ps *PeerState) {
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
// Simple hack to throttle logs upon sleep.
@ -644,7 +644,7 @@ func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *RoundS
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
// into play for liveness when there's a signature DDoS attack happening.
func (conR *ConsensusReactor) queryMaj23Routine(peer *p2p.Peer, ps *PeerState) {
func (conR *ConsensusReactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
@ -743,7 +743,7 @@ func (conR *ConsensusReactor) StringIndented(indent string) string {
s := "ConsensusReactor{\n"
s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n"
for _, peer := range conR.Switch.Peers().List() {
ps := peer.Data.Get(types.PeerStateKey).(*PeerState)
ps := peer.Get(types.PeerStateKey).(*PeerState)
s += indent + " " + ps.StringIndented(indent+" ") + "\n"
}
s += indent + "}"
@ -808,16 +808,18 @@ var (
// PeerState contains the known state of a peer, including its connection
// and threadsafe access to its PeerRoundState.
type PeerState struct {
Peer *p2p.Peer
Peer p2p.Peer
logger log.Logger
mtx sync.Mutex
PeerRoundState
}
// NewPeerState returns a new PeerState for the given Peer
func NewPeerState(peer *p2p.Peer) *PeerState {
func NewPeerState(peer p2p.Peer) *PeerState {
return &PeerState{
Peer: peer,
Peer: peer,
logger: log.NewNopLogger(),
PeerRoundState: PeerRoundState{
Round: -1,
ProposalPOLRound: -1,
@ -827,6 +829,11 @@ func NewPeerState(peer *p2p.Peer) *PeerState {
}
}
func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
ps.logger = logger
return ps
}
// GetRoundState returns an atomic snapshot of the PeerRoundState.
// There's no point in mutating it since it won't change PeerState.
func (ps *PeerState) GetRoundState() *PeerRoundState {
@ -1025,7 +1032,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
}
func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
logger := ps.Peer.Logger.With("peerRound", ps.Round, "height", height, "round", round)
logger := ps.logger.With("peerRound", ps.Round, "height", height, "round", round)
logger.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
// NOTE: some may be nil BitArrays -> no side effects.
@ -1163,7 +1170,7 @@ func (ps *PeerState) StringIndented(indent string) string {
%s Key %v
%s PRS %v
%s}`,
indent, ps.Peer.Key,
indent, ps.Peer.Key(),
indent, ps.PeerRoundState.StringIndented(indent+" "),
indent)
}


+ 5
- 5
consensus/reactor_test.go View File

@ -132,7 +132,7 @@ func TestVotingPowerChange(t *testing.T) {
//---------------------------------------------------------------------------
t.Log("---------------------------- Testing changing the voting power of one validator a few times")
val1PubKey := css[0].privValidator.(*types.PrivValidator).PubKey
val1PubKey := css[0].privValidator.GetPubKey()
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
@ -193,7 +193,7 @@ func TestValidatorSetChanges(t *testing.T) {
//---------------------------------------------------------------------------
t.Log("---------------------------- Testing adding one validator")
newValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower))
// wait till everyone makes block 2
@ -219,7 +219,7 @@ func TestValidatorSetChanges(t *testing.T) {
//---------------------------------------------------------------------------
t.Log("---------------------------- Testing changing the voting power of one validator")
updateValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
@ -235,10 +235,10 @@ func TestValidatorSetChanges(t *testing.T) {
//---------------------------------------------------------------------------
t.Log("---------------------------- Testing adding two validators at once")
newValidatorPubKey2 := css[nVals+1].privValidator.(*types.PrivValidator).PubKey
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower))
newValidatorPubKey3 := css[nVals+2].privValidator.(*types.PrivValidator).PubKey
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower))
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)


+ 7
- 25
consensus/replay.go View File

@ -19,6 +19,7 @@ import (
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
// Functionality to replay blocks and messages on recovery from a crash.
@ -115,35 +116,13 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
gr, found, err = cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight-1))
if err == io.EOF {
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
}
} else if err != nil {
return err
} else {
defer gr.Close()
}
if !found {
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, _, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
} else {
defer gr.Close()
}
// TODO (0.10.0): uncomment
// return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
}
cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight)
@ -221,7 +200,7 @@ func (h *Handshaker) NBlocks() int {
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn
res, err := proxyApp.Query().InfoSync()
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
if err != nil {
return errors.New(cmn.Fmt("Error calling Info: %v", err))
}
@ -257,7 +236,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
if appBlockHeight == 0 {
validators := types.TM2PB.Validators(h.state.Validators)
proxyApp.Consensus().InitChainSync(validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
}
// First handle edge cases and constraints on the storeBlockHeight
@ -324,8 +303,11 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, mutateState bool) ([]byte, error) {
// App is further behind than it should be, so we need to replay blocks.
// We replay all blocks from appBlockHeight+1.
//
// Note that we don't have an old version of the state,
// so we by-pass state validation/mutation using sm.ExecCommitBlock.
// This also means we won't be saving validator sets if they change during this period.
//
// If mutateState == true, the final block is replayed with h.replayBlock()
var appHash []byte


+ 5
- 2
consensus/replay_file.go View File

@ -241,12 +241,15 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
// Get State
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir())
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state, err := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
if err != nil {
cmn.Exit(err.Error())
}
// Create proxyAppConn connection (consensus, mempool, query)
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(state, blockStore))
_, err := proxyApp.Start()
_, err = proxyApp.Start()
if err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
}


+ 18
- 18
consensus/replay_test.go View File

@ -13,6 +13,7 @@ import (
"time"
"github.com/tendermint/abci/example/dummy"
abci "github.com/tendermint/abci/types"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
@ -162,8 +163,8 @@ LOOP:
cs.Wait()
}
func toPV(pv PrivValidator) *types.PrivValidator {
return pv.(*types.PrivValidator)
func toPV(pv types.PrivValidator) *types.PrivValidatorFS {
return pv.(*types.PrivValidatorFS)
}
func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
@ -267,8 +268,6 @@ func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum in
var (
NUM_BLOCKS = 6 // number of blocks in the test_data/many_blocks.cswal
mempool = types.MockMempool{}
testPartSize int
)
//---------------------------------------
@ -319,8 +318,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
walFile := writeWAL(string(walBody))
config.Consensus.SetWalFile(walFile)
privVal := types.LoadPrivValidator(config.PrivValidatorFile())
testPartSize = config.Consensus.BlockPartSize
privVal := types.LoadPrivValidatorFS(config.PrivValidatorFile())
wal, err := NewWAL(walFile, false)
if err != nil {
@ -335,7 +333,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
t.Fatalf(err.Error())
}
state, store := stateAndStore(config, privVal.PubKey)
state, store := stateAndStore(config, privVal.GetPubKey())
store.chain = chain
store.commits = commits
@ -349,7 +347,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2, nil)
state, _ := stateAndStore(config, privVal.PubKey)
state, _ := stateAndStore(config, privVal.GetPubKey())
buildAppStateFromChain(proxyApp, state, chain, nBlocks, mode)
}
@ -361,7 +359,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
}
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync()
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{""})
if err != nil {
t.Fatal(err)
}
@ -384,6 +382,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
}
func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) {
testPartSize := st.Params().BlockPartSizeBytes
err := st.ApplyBlock(nil, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
panic(err)
@ -398,7 +397,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns,
}
validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
defer proxyApp.Stop()
switch mode {
@ -432,7 +431,7 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B
defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
var latestAppHash []byte
@ -503,7 +502,7 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
// if its not the first one, we have a full block
if blockParts != nil {
var n int
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), 0, &n, &err).(*types.Block)
blocks = append(blocks, block)
}
blockParts = types.NewPartSetFromHeader(*p)
@ -524,7 +523,7 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
}
// grab the last block too
var n int
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), 0, &n, &err).(*types.Block)
blocks = append(blocks, block)
return blocks, commits, nil
}
@ -560,10 +559,10 @@ func readPieceFromWAL(msgBytes []byte) (interface{}, error) {
// fresh state and mock store
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
store := NewMockBlockStore(config)
store := NewMockBlockStore(config, state.Params())
return state, store
}
@ -572,13 +571,14 @@ func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBl
type mockBlockStore struct {
config *cfg.Config
params types.ConsensusParams
chain []*types.Block
commits []*types.Commit
}
// TODO: NewBlockStore(db.NewMemDB) ...
func NewMockBlockStore(config *cfg.Config) *mockBlockStore {
return &mockBlockStore{config, nil, nil}
func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
return &mockBlockStore{config, params, nil, nil}
}
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
@ -586,7 +586,7 @@ func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[h
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.config.Consensus.BlockPartSize).Header()},
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()},
Header: block.Header,
}
}


+ 8
- 14
consensus/state.go View File

@ -4,7 +4,7 @@ import (
"bytes"
"errors"
"fmt"
"path"
"path/filepath"
"reflect"
"sync"
"time"
@ -180,14 +180,6 @@ func (ti *timeoutInfo) String() string {
return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step)
}
// PrivValidator is a validator that can sign votes and proposals.
type PrivValidator interface {
GetAddress() []byte
SignVote(chainID string, vote *types.Vote) error
SignProposal(chainID string, proposal *types.Proposal) error
SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error
}
// ConsensusState handles execution of the consensus algorithm.
// It processes votes and proposals, and upon reaching agreement,
// commits blocks to the chain and executes them against the application.
@ -197,7 +189,7 @@ type ConsensusState struct {
// config details
config *cfg.ConsensusConfig
privValidator PrivValidator // for signing votes
privValidator types.PrivValidator // for signing votes
// services for creating and executing blocks
proxyAppConn proxy.AppConnConsensus
@ -308,7 +300,7 @@ func (cs *ConsensusState) GetValidators() (int, []*types.Validator) {
}
// SetPrivValidator sets the private validator account for signing votes.
func (cs *ConsensusState) SetPrivValidator(priv PrivValidator) {
func (cs *ConsensusState) SetPrivValidator(priv types.PrivValidator) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.privValidator = priv
@ -394,7 +386,7 @@ func (cs *ConsensusState) Wait() {
// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability
func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
err = cmn.EnsureDir(path.Dir(walFile), 0700)
err = cmn.EnsureDir(filepath.Dir(walFile), 0700)
if err != nil {
cs.Logger.Error("Error ensuring ConsensusState wal dir", "err", err.Error())
return err
@ -983,7 +975,8 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs)
return types.MakeBlock(cs.Height, cs.state.ChainID, txs, commit,
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.BlockPartSize)
cs.state.LastBlockID, cs.state.Validators.Hash(),
cs.state.AppHash, cs.state.Params().BlockPartSizeBytes)
}
// Enter: `timeoutPropose` after entering Propose.
@ -1417,7 +1410,8 @@ func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, ver
// Added and completed!
var n int
var err error
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(),
cs.state.Params().BlockSizeParams.MaxBytes, &n, &err).(*types.Block)
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
if cs.Step == RoundStepPropose && cs.isProposalComplete() {


+ 9
- 11
consensus/state_test.go View File

@ -79,7 +79,7 @@ func TestProposerSelection0(t *testing.T) {
<-newRoundCh
prop = cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[1].Address) {
if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
panic(Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address))
}
}
@ -100,7 +100,7 @@ func TestProposerSelection2(t *testing.T) {
// everyone just votes nil. we get a new proposer each round
for i := 0; i < len(vss); i++ {
prop := cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].Address) {
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) {
panic(Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
}
@ -180,7 +180,7 @@ func TestBadProposal(t *testing.T) {
height, round := cs1.Height, cs1.Round
vs2 := vss[1]
partSize := config.Consensus.BlockPartSize
partSize := cs1.state.Params().BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
@ -327,7 +327,7 @@ func TestLockNoPOL(t *testing.T) {
vs2 := vss[1]
height := cs1.Height
partSize := config.Consensus.BlockPartSize
partSize := cs1.state.Params().BlockPartSizeBytes
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
@ -493,7 +493,7 @@ func TestLockPOLRelock(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.Consensus.BlockPartSize
partSize := cs1.state.Params().BlockPartSizeBytes
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
@ -502,8 +502,6 @@ func TestLockPOLRelock(t *testing.T) {
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1)
t.Logf("vs2 last round %v", vs2.PrivValidator.LastRound)
// everything done from perspective of cs1
/*
@ -608,7 +606,7 @@ func TestLockPOLUnlock(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.Consensus.BlockPartSize
partSize := cs1.state.Params().BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
@ -703,7 +701,7 @@ func TestLockPOLSafety1(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.Consensus.BlockPartSize
partSize := cs1.state.Params().BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
@ -824,7 +822,7 @@ func TestLockPOLSafety2(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.Consensus.BlockPartSize
partSize := cs1.state.Params().BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
@ -999,7 +997,7 @@ func TestHalt1(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.Consensus.BlockPartSize
partSize := cs1.state.Params().BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)


+ 2
- 2
docs/abci-cli.rst View File

@ -1,5 +1,5 @@
Using the abci-cli
==================
Using ABCI-CLI
==============
To facilitate testing and debugging of ABCI servers and simple apps, we
built a CLI, the ``abci-cli``, for sending ABCI messages from the


+ 1
- 1
docs/app-architecture.rst View File

@ -58,7 +58,7 @@ Tendermint Core RPC
The concept is that the ABCI app is completely hidden from the outside
world and only communicated through a tested and secured `interface
exposed by the tendermint core <./rpc.html>`__. This interface
exposed by the tendermint core <./specification/rpc.html>`__. This interface
exposes a lot of data on the block header and consensus process, which
is quite useful for externally verifying the system. It also includes
3(!) methods to broadcast a transaction (propose it for the blockchain,


+ 0
- 16
docs/architecture/ABCI.md View File

@ -1,16 +0,0 @@
# ABCI
ABCI is an interface between the consensus/blockchain engine known as tendermint, and the application-specific business logic, known as an ABCi app.
The tendermint core should run unchanged for all apps. Each app can customize it, the supported transactions, queries, even the validator sets and how to handle staking / slashing stake. This customization is achieved by implementing the ABCi app to send the proper information to the tendermint engine to perform as directed.
To understand this decision better, think of the design of the tendermint engine.
* A blockchain is simply consensus on a unique global ordering of events.
* This consensus can efficiently be implemented using BFT and PoS
* This code can be generalized to easily support a large number of blockchains
* The block-chain specific code, the interpretation of the individual events, can be implemented by a 3rd party app without touching the consensus engine core
* Use an efficient, language-agnostic layer to implement this (ABCi)
Bucky, please make this doc real.

+ 1
- 12
docs/architecture/README.md View File

@ -2,15 +2,4 @@
This is a location to record all high-level architecture decisions in the tendermint project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match.
This is like our guide and mentor when Jae and Bucky are offline.... The concept comes from a [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t) that resonated among the team when Anton shared it.
Each section of the code can have it's own markdown file in this directory, and please add a link to the readme.
## Sections
* [ABCI](./ABCI.md)
* [go-merkle / merkleeyes](./merkle.md)
* [Frey's thoughts on the data store](./merkle-frey.md)
* basecoin
* tendermint core (multiple sections)
* ???
Read up on the concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t).

docs/architecture/adr-001.md → docs/architecture/adr-001-logging.md View File


docs/architecture/adr-002.md → docs/architecture/adr-002-event-subscription.md View File


docs/architecture/adr-003.md → docs/architecture/adr-003-abci-app-rpc.md View File


+ 38
- 0
docs/architecture/adr-004-historical-validators.md View File

@ -0,0 +1,38 @@
# ADR 004: Historical Validators
## Context
Right now, we can query the present validator set, but there is no history.
If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API.
## Decision
For every block, store a new structure that contains either the latest validator set,
or the height of the last block for which the validator set changed. Note this is not
the height of the block which returned the validator set change itself, but the next block,
ie. the first block it comes into effect for.
Storing the validators will be handled by the `state` package.
At some point in the future, we may consider more efficient storage in the case where the validators
are updated frequently - for instance by only saving the diffs, rather than the whole set.
An alternative approach suggested keeping the validator set, or diffs of it, in a merkle IAVL tree.
While it might afford cheaper proofs that a validator set has not changed, it would be more complex,
and likely less efficient.
## Status
Accepted.
## Consequences
### Positive
- Can query old validator sets, with proof.
### Negative
- Writes an extra structure to disk with every block.
### Neutral

+ 85
- 0
docs/architecture/adr-005-consensus-params.md View File

@ -0,0 +1,85 @@
# ADR 005: Consensus Params
## Context
Consensus critical parameters controlling blockchain capacity have until now been hard coded, loaded from a local config, or neglected.
Since they may be need to be different in different networks, and potentially to evolve over time within
networks, we seek to initialize them in a genesis file, and expose them through the ABCI.
While we have some specific parameters now, like maximum block and transaction size, we expect to have more in the future,
such as a period over which evidence is valid, or the frequency of checkpoints.
## Decision
### ConsensusParams
No consensus critical parameters should ever be found in the `config.toml`.
A new `ConsensusParams` is optionally included in the `genesis.json` file,
and loaded into the `State`. Any items not included are set to their default value.
A value of 0 is undefined (see ABCI, below). A value of -1 is used to indicate the parameter does not apply.
The parameters are used to determine the validity of a block (and tx) via the union of all relevant parameters.
```
type ConsensusParams struct {
BlockSizeParams
TxSizeParams
BlockGossipParams
}
type BlockSizeParams struct {
MaxBytes int
MaxTxs int
MaxGas int
}
type TxSizeParams struct {
MaxBytes int
MaxGas int
}
type BlockGossipParams struct {
BlockPartSizeBytes int
}
```
The `ConsensusParams` can evolve over time by adding new structs that cover different aspects of the consensus rules.
The `BlockPartSizeBytes` and the `BlockSizeParams.MaxBytes` are enforced to be greater than 0.
The former because we need a part size, the latter so that we always have at least some sanity check over the size of blocks.
### ABCI
#### InitChain
InitChain currently takes the initial validator set. It should be extended to also take parts of the ConsensusParams.
There is some case to be made for it to take the entire Genesis, except there may be things in the genesis,
like the BlockPartSize, that the app shouldn't really know about.
#### EndBlock
The EndBlock response includes a `ConsensusParams`, which includes BlockSizeParams and TxSizeParams, but not BlockGossipParams.
Other param struct can be added to `ConsensusParams` in the future.
The `0` value is used to denote no change.
Any other value will update that parameter in the `State.ConsensusParams`, to be applied for the next block.
Tendermint should have hard-coded upper limits as sanity checks.
## Status
Proposed.
## Consequences
### Positive
- Alternative capacity limits and consensus parameters can be specified without re-compiling the software.
- They can also change over time under the control of the application
### Negative
- More exposed parameters is more complexity
- Different rules at different heights in the blockchain complicates fast sync
### Neutral
- The TxSizeParams, which checks validity, may be in conflict with the config's `max_block_size_tx`, which determines proposal sizes

+ 16
- 0
docs/architecture/adr-template.md View File

@ -0,0 +1,16 @@
# ADR 000: Template for an ADR
## Context
## Decision
## Status
## Consequences
### Positive
### Negative
### Neutral

+ 0
- 240
docs/architecture/merkle-frey.md View File

@ -1,240 +0,0 @@
# Merkle data stores - Frey's proposal
## TL;DR
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implementation of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an app hash to the consensus engine, as well as a full proof to any client.
This is equivalent to building a database, and I would propose designing it from the API first, then looking how to implement this (or make an adapter from the API to existing implementations). Once we agree on the functionality and the interface, we can implement the API bindings, and then work on building adapters to existence merkle-ized data stores, or modifying the stores to support this interface.
We need to consider the API (both in-process and over the network), language bindings, maintaining handles to old state (and garbage collecting), persistence, security, providing merkle proofs, and general key-value store operations. To stay consistent with the blockchains "single global order of operations", this data store should only allow one connection at a time to have write access.
## Overview
* **State**
* There are two concepts of state, "committed state" and "working state"
* The working state is only accessible from the ABCi app, allows writing, but does not need to support proofs.
* When we commit the "working state", it becomes a new "committed state" and has an immutable root hash, provides proofs, and can be exposed to external clients.
* **Transactions**
* The database always allows creating a read-only transaction at the last "committed state", this transaction can serve read queries and proofs.
* The database maintains all data to serve these read transactions until they are closed by the client (or time out). This allows the client(s) to determine how much old info is needed
* The database can only support *maximal* one writable transaction at a time. This makes it easy to enforce serializability, and attempting to start a second writable transaction may trigger a panic.
* **Functionality**
* It must support efficient key-value operations (get/set/delete)
* It must support returning merkle proofs for any "committed state"
* It should support range queries on subsets of the key space if possible (ie. if the db doesn't hash keys)
* It should also support listening to changes to a desired key via pub-sub or similar method, so I can quickly notify you on a change to your balance without constant polling.
* It may support other db-specific query types as an extension to this interface, as long as all specified actions maintain their meaning.
* **Interface**
* This interface should be domain-specific - ie. designed just for this use case
* It should present a simple go interface for embedding the data store in-process
* It should create a gRPC/protobuf API for calling from any client
* It should provide and maintain client adapters from our in-process interface to gRPC client calls for at least golang and Java (maybe more languages?)
* It should provide and maintain server adapters from our gRPC calls to the in-process interface for golang at least (unless there is another server we wish to support)
* **Persistence**
* It must support atomic persistence upon committing a new block. That is, upon crash recovery, the state is guaranteed to represent the state at the end of a complete block (along with a note of which height it was).
* It must delay deletion of old data as long as there are open read-only transactions referring to it, thus we must maintain some sort of WAL to keep track of pending cleanup.
* When a transaction is closed, or when we recover from a crash, it should clean up all no longer needed data to avoid memory/storage leaks.
* **Security and Auth**
* If we allow connections over gRPC, we must consider this issues and allow both encryption (SSL), and some basic auth rules to prevent undesired access to the DB
* This is client-specific and does not need to be supported in the in-process, embedded version.
## Details
Here we go more in-depth in each of the sections, explaining the reasoning and more details on the desired behavior. This document is only the high-level architecture and should support multiple implementations. When building out a specific implementation, a similar document should be provided for that repo, showing how it implements these concepts, and details about memory usage, storage, efficiency, etc.
### State
The current ABCi interface avoids this question a bit and that has brought confusion. If I use `merkleeyes` to store data, which state is returned from `Query`? The current "working" state, which I would like to refer to in my ABCi application? Or the last committed state, which I would like to return to a client's query? Or an old state, which I may select based on height?
Right now, `merkleeyes` implements `Query` like a normal ABCi app and only returns committed state, which has lead to problems and confusion. Thus, we need to be explicit about which state we want to view. Each viewer can then specify which state it wants to view. This allows the app to query the working state in DeliverTx, but the committed state in Query.
We can easily provide two global references for "last committed" and "current working" states. However, if we want to also allow querying of older commits... then we need some way to keep track of which ones are still in use, so we can garbage collect the unneeded ones. There is a non-trivial overhead in holding references to all past states, but also a hard-coded solution (hold onto the last 5 commits) may not support all clients. We should let the client define this somehow.
### Transactions
Transactions (in the typical database sense) are a clean and established solution to this issue. We can look at the [isolations levels](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable) which attempt to provide us things like "repeatable reads". That means if we open a transaction, and query some data 100 times while other processes are writing to the db, we get the same result each time. This transaction has a reference to its own local state from the time the transaction started. (We are referring to the highest isolation levels here, which correlate well this the blockchain use case).
If we implement a read-only transaction as a reference to state at the time of creation of that transaction, we can then hold these references to various snapshots, one per block that we are interested, and allow the client to multiplex queries and proofs from these various blocks.
If we continue using these concepts (which have informed 30+ years of server side design), we can add a few nice features to our write transactions. The first of which is `Rollback` and `Commit`. That means all the changes we make in this transaction have no effect on the database until they are committed. And until they are committed, we can always abort if we detect an anomaly, returning to the last committed state with a rollback.
There is also a nice extension to this available on some database servers, basically, "nested" transactions or "savepoints". This means that within one transaction, you can open a subtransaction/savepoint and continue work. Later you have the option to commit or rollback all work since the savepoint/subtransaction. And then continue with the main transaction.
If you don't understand why this is useful, look at how basecoin needs to [hold cached state for AppTx](https://github.com/tendermint/basecoin/blob/master/state/execution.go#L126-L149), meaning that it rolls back all modifications if the AppTx returns an error. This was implemented as a wrapper in basecoin, but it is a reasonable thing to support in the DB interface itself (especially since the implementation becomes quite non-trivial as soon as you support range queries).
To give a bit more reference to this concept in practice, read about [Savepoints in Postgresql](https://www.postgresql.org/docs/current/static/tutorial-transactions.html) ([reference](https://www.postgresql.org/docs/current/static/sql-savepoint.html)) or [Nesting transactions in SQL Server](http://dba-presents.com/index.php/databases/sql-server/43-nesting-transactions-and-save-transaction-command) (TL;DR: scroll to the bottom, section "Real nesting transactions with SAVE TRANSACTION")
### Functionality
Merkle trees work with key-value pairs, so we should most importantly focus on the basic Key-Value operations. That is `Get`, `Set`, and `Remove`. We also need to return a merkle proof for any key, along with a root hash of the tree for committing state to the blockchain. This is just the basic merkle-tree stuff.
If it is possible with the implementation, it is nice to provide access to Range Queries. That is, return all values where the key is between X and Y. If you construct your keys wisely, it is possible to store lists (1:N) relations this way. Eg, storing blog posts and the key is blog:`poster_id`:`sequence`, then I could search for all blog posts by a given `poster_id`, or even return just posts 10-19 from the given poster.
The construction of a tree that supports range queries was one of the [design decisions of go-merkle](https://github.com/tendermint/go-merkle/blob/master/README.md). It is also kind of possible with [ethereum's patricia trie](https://github.com/ethereum/wiki/wiki/Patricia-Tree) as long as the key is less than 32 bytes.
In addition to range queries, there is one more nice feature that we could add to our data store - listening to events. Depending on your context, this is "reactive programming", "event emitters", "notifications", etc... But the basic concept is that a client can listen for all changes to a given key (or set of keys), and receive a notification when this happens. This is very important to avoid [repeated polling and wasted queries](http://resthooks.org/) when a client simply wants to [detect changes](https://www.rethinkdb.com/blog/realtime-web/).
If the database provides access to some "listener" functionality, the app can choose to expose this to the external client via websockets, web hooks, http2 push events, android push notifications, etc, etc etc.... But if we want to support modern client functionality, let's add support for this reactive paradigm in our DB interface.
**TODO** support for more advanced backends, eg. Bolt....
### Go Interface
I will start with a simple go interface to illustrate the in-process interface. Once there is agreement on how this looks, we can work out the gRPC bindings to support calling out of process. These interfaces are not finalized code, but I think the demonstrate the concepts better than text and provide a strawman to get feedback.
```
// DB represents the committed state of a merkle-ized key-value store
type DB interface {
// Snapshot returns a reference to last committed state to use for
// providing proofs, you must close it at the end to garbage collect
// the historical state we hold on to to make these proofs
Snapshot() Prover
// Start a transaction - only way to change state
// This will return an error if there is an open Transaction
Begin() (Transaction, error)
// These callbacks are triggered when the Transaction is Committed
// to the DB. They can be used to eg. notify clients via websockets when
// their account balance changes.
AddListener(key []byte, listener Listener)
RemoveListener(listener Listener)
}
// DBReader represents a read-only connection to a snapshot of the db
type DBReader interface {
// Queries on my local view
Has(key []byte) (bool, error)
Get(key []byte) (Model, error)
GetRange(start, end []byte, ascending bool, limit int) ([]Model, error)
Closer
}
// Prover is an interface that lets one query for Proofs, holding the
// data at a specific location in memory
type Prover interface {
DBReader
// Hash is the AppHash (RootHash) for this block
Hash() (hash []byte)
// Prove returns the data along with a merkle Proof
// Model and Proof are nil if not found
Prove(key []byte) (Model, Proof, error)
}
// Transaction is a set of state changes to the DB to be applied atomically.
// There can only be one open transaction at a time, which may only have
// maximum one subtransaction at a time.
// In short, at any time, there is exactly one object that can write to the
// DB, and we can use Subtransactions to group operations and roll them back
// together (kind of like `types.KVCache` from basecoin)
type Transaction interface {
DBReader
// Change the state - will raise error immediately if this Transaction
// is not holding the exclusive write lock
Set(model Model) (err error)
Remove(key []byte) (removed bool, err error)
// Subtransaction starts a new subtransaction, rollback will not affect the
// parent. Only on Commit are the changes applied to this transaction.
// While the subtransaction exists, no write allowed on the parent.
// (You must Commit or Rollback the child to continue)
Subtransaction() Transaction
// Commit this transaction (or subtransaction), the parent reference is
// now updated.
// This only updates persistant store if the top level transaction commits
// (You may have any number of nested sub transactions)
Commit() error
// Rollback ends the transaction and throw away all transaction-local state,
// allowing the tree to prune those elements.
// The parent transaction now recovers the write lock.
Rollback()
}
// Listener registers callbacks on changes to the data store
type Listener interface {
OnSet(key, value, oldValue []byte)
OnRemove(key, oldValue []byte)
}
// Proof represents a merkle proof for a key
type Proof interface {
RootHash() []byte
Verify(key, value, root []byte) bool
}
type Model interface {
Key() []byte
Value() []byte
}
// Closer releases the reference to this state, allowing us to garbage collect
// Make sure to call it before discarding.
type Closer interface {
Close()
}
```
### Remote Interface
The use-case of allowing out-of-process calls is very powerful. Not just to provide a powerful merkle-ready data store to non-go applications.
It we allow the ABCi app to maintain the only writable connections, we can guarantee that all transactions are only processed through the tendermint consensus engine. We could then allow multiple "web server" machines "read-only" access and scale out the database reads, assuming the consensus engine, ABCi logic, and public key cryptography is more the bottleneck than the database. We could even place the consensus engine, ABCi app, and data store on one machine, connected with unix sockets for security, and expose a tcp/ssl interface for reading the data, to scale out query processing over multiple machines.
But returning our focus directly to the ABCi app (which is the most important use case). An app may well want to maintain 100 or 1000 snapshots of different heights to allow people to easily query many proofs at a given height without race conditions (very important for IBC, ask Jae). Thus, we should not require a separate TCP connection for each height, as this gets quite awkward with so many connections. Also, if we want to use gRPC, we should consider the connections potentially transient (although they are more efficient with keep-alive).
Thus, the wire encoding of a transaction or a snapshot should simply return a unique id. All methods on a `Prover` or `Transaction` over the wire can send this id along with the arguments for the method call. And we just need a hash map on the server to map this id to a state.
The only negative of not requiring a persistent tcp connection for each snapshot is there is no auto-detection if the client crashes without explicitly closing the connections. Thus, I would suggest adding a `Ping` thread in the gRPC interface which keeps the Snapshot alive. If no ping is received within a server-defined time, it may automatically close those transactions. And if we consider a client with 500 snapshots that needs to ping each every 10 seconds, that is a lot of overhead, so we should design the ping to accept a list of IDs for the client and update them all. Or associate all snapshots with a clientID and then just send the clientID in the ping. (Please add other ideas on how to detect client crashes without persistent connections).
To encourage adoption, we should provide a nice client that uses this gRPC interface (like we do with ABCi). For go, the client may have the exact same interface as the in-process version, just that the error call may return network errors, not just illegal operations. We should also add a client with a clean API for Java, since that seems to be popular among app developers in the current tendermint community. Other bindings as we see the need in the server space.
### Persistence
Any data store worth it's name should not lose all data on a crash. Even [redis provides some persistence](https://redis.io/topics/persistence) these days. Ideally, if the system crashes and restarts, it should have the data at the last block N that was committed. If the system crash during the commit of block N+1, then the recovered state should either be block N or completely committed block N+1, but no partial state between the two. Basically, the commit must be an atomic operation (even if updating 100's of records).
To avoid a lot of headaches ourselves, we can use an existing data store, such as leveldb, which provides `WriteBatch` to group all operations.
The other issue is cleaning up old state. We cannot delete any information from our persistent store, as long as any snapshot holds a reference to it (or else we get some panics when the data we query is not there). So, we need to store the outstanding deletions that we can perform when the snapshot is `Close`d. In addition, we must consider the case that the data store crashes with open snapshots. Thus, the info on outstanding deletions must also be persisted somewhere. Something like a "delete-behind log" (the opposite of a "write ahead log").
This is not a concern of the generic interface, but each implementation should take care to handle this well to avoid accumulation of unused references in the data store and eventual data bloat.
#### Backing stores
It is way outside the scope of this project to build our own database that is capable of efficiently storing the data, provide multiple read-only snapshots at once, and save it atomically. The best approach seems to select an existing database (best a simple one) that provides this functionality and build upon it, much like the current `go-merkle` implementation builds upon `leveldb`. After some research here are winners and losers:
**Winners**
* Leveldb - [provides consistent snapshots](https://ayende.com/blog/161705/reviewing-leveldb-part-xiii-smile-and-here-is-your-snapshot), and [provides tooling for building ACID compliance](http://codeofrob.com/entries/writing-a-transaction-manager-on-top-of-leveldb.html)
* Note there are at least two solid implementations available in go - [goleveldb](https://github.com/syndtr/goleveldb) - a pure go implementation, and [levigo](https://github.com/jmhodges/levigo) - a go wrapper around leveldb.
* Goleveldb is much easier to compile and cross-compile (not requiring cgo), while levigo (or cleveldb) seems to provide a significant performance boosts (but I had trouble even running benchmarks)
* PostgreSQL - fully supports these ACID semantics if you call `SET TRANSACTION ISOLATION LEVEL SERIALIZABLE` at the beginning of a transaction (tested)
* This may be total overkill unless we also want to make use of other features, like storing data in multiple columns with secondary indexes.
* Trillian can show an example of [how to store a merkle tree in sql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go)
**Losers**
* Bolt - open [read-only snapshots can block writing](https://github.com/boltdb/bolt/issues/378)
* Mongo - [barely even supports atomic operations](https://docs.mongodb.com/manual/core/write-operations-atomicity/), much less multiple snapshots
**To investigate**
* [Trillian](https://github.com/google/trillian) - has a [persistent merkle tree interface](https://github.com/google/trillian/blob/master/storage/tree_storage.go) along with [backend storage with mysql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go), good inspiration for our design if not directly using it
* [Moss](https://github.com/couchbase/moss) - another key-value store in go, seems similar to leveldb, maybe compare with performance tests?
### Security
When allowing access out-of-process, we should provide different mechanisms to secure it. The first is the choice of binding to a local unix socket or a tcp port. The second is the optional use of ssl to encrypt the connection (very important over tcp). The third is authentication to control access to the database.
We may also want to consider the case of two server connections with different permissions, eg. a local unix socket that allows write access with no more credentials, and a public TCP connection with ssl and authentication that only provides read-only access.
The use of ssl is quite easy in go, we just need to generate and sign a certificate, so it is nice to be able to disable it for dev machines, but it is very important for production.
For authentication, let me sketch out a minimal solution. The server could just have a simple config file with key/bcrypt(password) pairs along with read/write permission level, and read that upon startup. The client must provide a username and password in the HTTP headers when making the original HTTPS gRPC connection.
This is super minimal to provide some protection. Things like LDAP, OAuth and single-sign on seem overkill and even potential security holes. Maybe there is another solution somewhere in the middle.

+ 0
- 17
docs/architecture/merkle.md View File

@ -1,17 +0,0 @@
# Merkle data stores
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implemention of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an apphash to the consensus engine, as well as a full proof to any client.
This engine is currently implemented in `go-merkle` with `merkleeyes` providing a language-agnostic binding via ABCi. It uses `tmlibs/db` bindings internally to persist data to leveldb.
What are some of the requirements of this store:
* It must support efficient key-value operations (get/set/delete)
* It must support persistance.
* We must only persist complete blocks, so when we come up after a crash we are at the state of block N or N+1, but not in-between these two states.
* It must allow us to read/write from one uncommited state (working state), while serving other queries from the last commited state. And a way to determine which one to serve for each client.
* It must allow us to hold references to old state, to allow providing proofs from 20 blocks ago. We can define some limits as to the maximum time to hold this data.
* We provide in process binding in Go
* We provide language-agnostic bindings when running the data store as it's own process.

docs/images/abci.png → docs/assets/abci.png View File


docs/images/consensus_logic.png → docs/assets/consensus_logic.png View File


docs/images/tm-transaction-flow.png → docs/assets/tm-transaction-flow.png View File


+ 29
- 1
docs/conf.py View File

@ -16,9 +16,10 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import urllib
import sphinx_rtd_theme
@ -169,3 +170,30 @@ texinfo_documents = [
author, 'Tendermint', 'Byzantine Fault Tolerant Consensus.',
'Database'),
]
repo = "https://raw.githubusercontent.com/tendermint/tools/"
branch = "master"
tools = "./tools"
assets = tools + "/assets"
if os.path.isdir(tools) != True:
os.mkdir(tools)
if os.path.isdir(assets) != True:
os.mkdir(assets)
urllib.urlretrieve(repo+branch+'/ansible/README.rst', filename=tools+'/ansible.rst')
urllib.urlretrieve(repo+branch+'/ansible/assets/a_plus_t.png', filename=assets+'/a_plus_t.png')
urllib.urlretrieve(repo+branch+'/docker/README.rst', filename=tools+'/docker.rst')
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/README.rst', filename=tools+'/mintnet-kubernetes.rst')
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets+'/gce1.png')
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets+'/gce2.png')
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets+'/statefulset.png')
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets+'/t_plus_k.png')
urllib.urlretrieve(repo+branch+'/terraform-digitalocean/README.rst', filename=tools+'/terraform-digitalocean.rst')
urllib.urlretrieve(repo+branch+'/tm-bench/README.rst', filename=tools+'/benchmarking-and-monitoring.rst')
# the readme for below is included in tm-bench
# urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst')

+ 47
- 4
docs/deploy-testnets.rst View File

@ -40,8 +40,51 @@ Automated Deployments
---------------------
While the manual deployment is easy enough, an automated deployment is
always better. For this, we have the `mintnet-kubernetes
tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__,
which allows us to automate the deployment of a Tendermint network on an
already provisioned kubernetes cluster. And for simple provisioning of kubernetes
usually quicker. The below examples show different tools that can be used
for automated deployments.
Automated Deployment using Kubernetes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The `mintnet-kubernetes tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__
allows automating the deployment of a Tendermint network on an already
provisioned kubernetes cluster. For simple provisioning of a kubernetes
cluster, check out the `Google Cloud Platform <https://cloud.google.com/>`__.
Automated Deployment using Terraform and Ansible
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The `terraform-digitalocean tool <https://github.com/tendermint/tools/tree/master/terraform-digitalocean>`__
allows creating a set of servers on the DigitalOcean cloud.
The `ansible playbooks <https://github.com/tendermint/tools/tree/master/ansible>`__
allow creating and managing a ``basecoin`` or ``ethermint`` testnet on provisioned servers.
Package Deployment on Linux for developers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``tendermint`` and ``basecoin`` applications can be installed from RPM or DEB packages on
Linux machines for development purposes. The packages are configured to be validators on the
one-node network that the machine represents. The services are not started after installation,
this way giving an opportunity to reconfigure the applications before starting.
The Ansible playbooks in the previous section use this repository to install ``basecoin``.
After installation, additional steps are executed to make sure that the multi-node testnet has
the right configuration before start.
Install from the CentOS/RedHat repository:
::
rpm --import https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint
wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo
yum install basecoin
Install from the Debian/Ubuntu repository:
::
wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add -
wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list
apt-get update && apt-get install basecoin

+ 94
- 0
docs/ecosystem.rst View File

@ -0,0 +1,94 @@
Tendermint Ecosystem
====================
Below are the many applications built using various pieces of the Tendermint stack. We thank the community for their contributions thus far and welcome the addition of new projects. Feel free to submit a pull request to add your project!
ABCI Applications
-----------------
Burrow
^^^^^^
Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store, written in Go, authored by Monax Industries, and incubated `by Hyperledger <https://github.com/hyperledger/burrow>`__.
cb-ledger
^^^^^^^^^
Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow, written in C++, and `authored by Block Finance <https://github.com/block-finance/cpp-abci>`__.
Clearchain
^^^^^^^^^^
Application to manage a distributed ledger for money transfers that support multi-currency accounts, written in Go, and `authored by Allession Treglia <https://github.com/tendermint/clearchain>`__.
Comit
^^^^^
Public service reporting and tracking, written in Go, and `authored by Zach Balder <https://github.com/zbo14/comit>`__.
Cosmos SDK
^^^^^^^^^^
A prototypical account based crypto currency state machine supporting plugins, written in Go, and `authored by Cosmos <https://github.com/cosmos/cosmos-sdk>`__.
Ethermint
^^^^^^^^^
The go-ethereum state machine run as a ABCI app, written in Go, `authored by Tendermint <https://github.com/tendermint/ethermint>`__.
Merkle AVL Tree
^^^^^^^^^^^^^^^
The following are implementations of the Tendermint IAVL tree as an ABCI application
Merkleeyes
~~~~~~~~~~
Written in Go, `authored by Tendermint <https://github.com/tendermint/merkleeyes>`__.
MerkleTree
~~~~~~~~~~
Written in Java, `authored by jTendermint <https://github.com/jTendermint/MerkleTree>`__.
TMChat
^^^^^^
P2P chat using Tendermint, written in Java, `authored by woldposd <https://github.com/wolfposd/TMChat>`__.
Passwerk
^^^^^^^^
Encrypted storage web-utility backed by Tendermint, written in Go, `authored by Rigel Rozanski <https://github.com/rigelrozanski/passwerk>`__.
ABCI Servers
------------
+-------------------------------------------------------------+--------------------+--------------+
| **Name** | **Author** | **Language** |
| | | |
+-------------------------------------------------------------+--------------------+--------------+
| `abci <https://github.com/tendermint/abci>`__ | Tendermint | Go |
+-------------------------------------------------------------+--------------------+--------------+
| `js abci <https://github.com/tendermint/js-abci>`__ | Tendermint | Javascript |
+-------------------------------------------------------------+--------------------+--------------+
| `cpp-tmsp <https://github.com/mdyring/cpp-tmsp>`__ | Martin Dyring | C++ |
+-------------------------------------------------------------+--------------------+--------------+
| `jabci <https://github.com/jTendermint/jabci>`__ | jTendermint | Java |
+-------------------------------------------------------------+--------------------+--------------+
| `Spearmint <https://github.com/dennismckinnon/spearmint>`__ | Dennis Mckinnon | Javascript |
+-------------------------------------------------------------+--------------------+--------------+
| `ocaml-tmsp <https://github.com/zbo14/ocaml-tmsp>`__ | Zach Balder | Ocaml |
+-------------------------------------------------------------+--------------------+--------------+
| `abci_server <https://github.com/KrzysiekJ/abci_server>`__ | Krzysztof Jurewicz | Erlang |
+-------------------------------------------------------------+--------------------+--------------+
Deployment Tools
----------------
See `deploy testnets <./deploy-testnets.html>`__ for information about all the tools built by Tendermint. We have Kubernetes, Ansible, and Terraform integrations.
Cloudsoft built `brooklyn-tendermint <https://github.com/cloudsoft/brooklyn-tendermint>`__ for deploying a tendermint testnet in docker continers. It uses Clocker for Apache Brooklyn.

+ 22
- 1
docs/index.rst View File

@ -23,9 +23,30 @@ Tendermint 101
introduction.rst
install.rst
getting-started.rst
deploy-testnets.rst
using-tendermint.rst
Tendermint Tools
----------------
.. toctree::
:maxdepth: 2
deploy-testnets.rst
tools/ansible.rst
tools/docker.rst
tools/mintnet-kubernetes.rst
tools/terraform-digitalocean.rst
tools/benchmarking-and-monitoring.rst
Tendermint Ecosystem
--------------------
.. toctree::
:maxdepth: 2
ecosystem.rst
Tendermint 102
--------------


+ 20
- 12
docs/install.rst View File

@ -1,17 +1,24 @@
Install from Source
===================
Install Tendermint
==================
From Binary
-----------
To download pre-built binaries, see the `Download page <https://tendermint.com/download>`__.
This page provides instructions on installing Tendermint from source. To
download pre-built binaries, see the `Download page <https://tendermint.com/download>`__.
From Source
-----------
You'll need `go`, maybe `glide` and the tendermint source code.
Install Go
----------
^^^^^^^^^^
Make sure you have `installed Go <https://golang.org/doc/install>`__ and
set the ``GOPATH``.
Install Tendermint
------------------
Get Source Code
^^^^^^^^^^^^^^^
You should be able to install the latest with a simple
@ -19,13 +26,14 @@ You should be able to install the latest with a simple
go get github.com/tendermint/tendermint/cmd/tendermint
Run ``tendermint --help`` for more.
Run ``tendermint --help`` and ``tendermint version`` to ensure your
installation worked.
If the installation failed, a dependency may been updated and become
incompatible with the latest Tendermint master branch. We solve this
using the ``glide`` tool for dependency management.
Fist, install ``glide``:
First, install ``glide``:
::
@ -45,7 +53,7 @@ still cloned to the correct location in the ``$GOPATH``.
The latest Tendermint Core version is now installed.
Reinstall
~~~~~~~~~
---------
If you already have Tendermint installed, and you make updates, simply
@ -79,7 +87,7 @@ Since the third option just uses ``glide`` right away, it should always
work.
Troubleshooting
~~~~~~~~~~~~~~~
---------------
If ``go get`` failing bothers you, fetch the code using ``git``:
@ -92,7 +100,7 @@ If ``go get`` failing bothers you, fetch the code using ``git``:
go install ./cmd/tendermint
Run
~~~
^^^
To start a one-node blockchain with a simple in-process application:


+ 6
- 6
docs/introduction.rst View File

@ -86,10 +86,10 @@ And we plan to do the same for Bitcoin, ZCash, and various other deterministic a
Another example of a cryptocurrency application built on Tendermint is `the Cosmos network <http://cosmos.network>`__.
Fabric, Burrow
~~~~~~~~~~~~~~
Other Blockchain Projects
~~~~~~~~~~~~~~~~~~~~~~~~~
`Fabric <https://github.com/hyperledger/fabric>`__, takes a similar approach to Tendermint, but is more opinionated about how the state is managed,
`Fabric <https://github.com/hyperledger/fabric>`__ takes a similar approach to Tendermint, but is more opinionated about how the state is managed,
and requires that all application behaviour runs in potentially many docker containers, modules it calls "chaincode".
It uses an implementation of `PBFT <http://pmg.csail.mit.edu/papers/osdi99.pdf>`__.
from a team at IBM that is
@ -156,7 +156,7 @@ There can be multiple ABCI socket connections to an application. Tendermint Core
It's probably evident that applications designers need to very carefully design their message handlers to create a blockchain that does anything useful but this architecture provides a place to start. The diagram below illustrates the flow of messages via ABCI.
.. figure:: images/abci.png
.. figure:: assets/abci.png
A Note on Determinism
~~~~~~~~~~~~~~~~~~~~~
@ -180,7 +180,7 @@ Consensus Overview
Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus protocol.
The protocol follows a simple state machine that looks like this:
.. figure:: images/consensus_logic.png
.. figure:: assets/consensus_logic.png
Participants in the protocol are called "validators";
they take turns proposing blocks of transactions and voting on them.
@ -228,4 +228,4 @@ The `Cosmos Network <http://cosmos.network>`__ is designed to use this Proof-of-
The following diagram is Tendermint in a (technical) nutshell. `See here for high resolution version <https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf>`__.
.. figure:: images/tm-transaction-flow.png
.. figure:: assets/tm-transaction-flow.png

+ 11
- 11
docs/specification.rst View File

@ -7,14 +7,14 @@ Here you'll find details of the Tendermint specification. See `the spec repo <ht
.. toctree::
:maxdepth: 2
block-structure.rst
byzantine-consensus-algorithm.rst
configuration.rst
fast-sync.rst
genesis.rst
light-client-protocol.rst
merkle.rst
rpc.rst
secure-p2p.rst
validators.rst
wire-protocol.rst
specification/block-structure.rst
specification/byzantine-consensus-algorithm.rst
specification/configuration.rst
specification/fast-sync.rst
specification/genesis.rst
specification/light-client-protocol.rst
specification/merkle.rst
specification/rpc.rst
specification/secure-p2p.rst
specification/validators.rst
specification/wire-protocol.rst

docs/block-structure.rst → docs/specification/block-structure.rst View File


docs/byzantine-consensus-algorithm.rst → docs/specification/byzantine-consensus-algorithm.rst View File


docs/configuration.rst → docs/specification/configuration.rst View File


docs/fast-sync.rst → docs/specification/fast-sync.rst View File


docs/genesis.rst → docs/specification/genesis.rst View File


docs/light-client-protocol.rst → docs/specification/light-client-protocol.rst View File


docs/merkle.rst → docs/specification/merkle.rst View File


docs/rpc.rst → docs/specification/rpc.rst View File


docs/secure-p2p.rst → docs/specification/secure-p2p.rst View File


docs/validators.rst → docs/specification/validators.rst View File


docs/wire-protocol.rst → docs/specification/wire-protocol.rst View File


+ 4
- 4
docs/using-tendermint.rst View File

@ -117,7 +117,7 @@ Configuration
-------------
Tendermint uses a ``config.toml`` for configutation. For details, see
`the documentation <./configuration.html>`__.
`the documentation <./specification/configuration.html>`__.
Notable options include the socket address of the application
(``proxy_app``), the listenting address of the tendermint peer
@ -177,7 +177,7 @@ When ``tendermint init`` is run, both a ``genesis.json`` and
"genesis_time": "0001-01-01T00:00:00.000Z",
"validators": [
{
"amount": 10,
"power": 10,
"name": "",
"pub_key": [
1,
@ -310,7 +310,7 @@ then the new ``genesis.json`` will be:
"genesis_time": "0001-01-01T00:00:00.000Z",
"validators": [
{
"amount": 10,
"power": 10,
"name": "",
"pub_key": [
1,
@ -318,7 +318,7 @@ then the new ``genesis.json`` will be:
]
},
{
"amount": 10,
"power": 10,
"name": "",
"pub_key": [
1,


+ 47
- 39
glide.lock View File

@ -1,41 +1,48 @@
hash: 41581813ff97225a7feb86b5accb0fe4acb3e198b64592d7452240e9473c479f
updated: 2017-08-03T19:17:16.410522485Z
hash: e3649cac7b1b9a23c024a9d1bbebd5a147861d55da2bca77c95129b6021850b4
updated: 2017-09-22T13:24:29.443800586-04:00
imports:
- name: github.com/btcsuite/btcd
version: b8df516b4b267acf2de46be593a9d948d1d2c420
version: 4803a8291c92a1d2d41041b942a9a9e37deab065
subpackages:
- btcec
- name: github.com/btcsuite/fastsha256
version: 637e656429416087660c84436a2a035d69d54e2e
- name: github.com/ebuchman/fail-test
version: 95f809107225be108efcf10a3509e4ea6ceef3c4
- name: github.com/fsnotify/fsnotify
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
- name: github.com/go-kit/kit
version: d67bb4c202e3b91377d1079b110a6c9ce23ab2f8
version: 0d313fb5fb3a94d87d61e6434785264e87a5d740
subpackages:
- log
- log/level
- log/term
- name: github.com/go-logfmt/logfmt
version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
- name: github.com/go-playground/locales
version: 1e5f1161c6416a5ff48840eb8724a394e48cc534
subpackages:
- currency
- name: github.com/go-playground/universal-translator
version: 71201497bace774495daed26a3874fd339e0b538
- name: github.com/go-stack/stack
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf
- name: github.com/gogo/protobuf
version: 9df9efe4c742f1a2bfdedf1c3b6902fc6e814c6b
version: 2adc21fd136931e0388e278825291678e1d98309
subpackages:
- proto
- name: github.com/golang/protobuf
version: 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8
version: 130e6b02ab059e7b717a096f397c5b60111cae74
subpackages:
- proto
- ptypes
- ptypes/any
- ptypes/duration
- ptypes/timestamp
- name: github.com/golang/snappy
version: 553a641470496b2327abcac10b36396bd98e45c9
- name: github.com/gorilla/websocket
version: a91eba7f97777409bc2c443f5534d41dd20c5720
version: 6f34763140ed8887aed6a044912009832b4733d7
- name: github.com/hashicorp/hcl
version: 392dba7d905ed5d04a5794ba89f558b27e2ba1ca
version: 68e816d1c783414e79bc65b3994d9ab6b0a722ab
subpackages:
- hcl/ast
- hcl/parser
@ -52,33 +59,31 @@ imports:
- name: github.com/kr/logfmt
version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
- name: github.com/magiconair/properties
version: 51463bfca2576e06c62a8504b5c0f06d61312647
version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a
- name: github.com/mitchellh/mapstructure
version: cc8532a8e9a55ea36402aa21efdf403a60d34096
- name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992
version: d0303fe809921458f417bcf828397a65db30a7e4
- name: github.com/pelletier/go-toml
version: 5ccdfb18c776b740aecaf085c4d9a2779199c279
version: 1d6b12b7cb290426e27e6b4e38b89fcda3aeef03
- name: github.com/pkg/errors
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/rcrowley/go-metrics
version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c
- name: github.com/spf13/afero
version: 9be650865eab0c12963d8753212f4f9c66cdcf12
version: ee1bd8ee15a1306d1f9201acc41ef39cd9f99a1b
subpackages:
- mem
- name: github.com/spf13/cast
version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4
- name: github.com/spf13/cobra
version: 4cdb38c072b86bf795d2c81de50784d9fdd6eb77
version: b78744579491c1ceeaaa3b40205e56b0591b93a3
- name: github.com/spf13/jwalterweatherman
version: 8f07c835e5cc1450c082fe3a439cf87b0cbb2d99
version: 12bd96e66386c1960ab0f74ced1362f66f552f7b
- name: github.com/spf13/pflag
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
version: 7aff26db30c1be810f9de5038ec5ef96ac41fd7c
- name: github.com/spf13/viper
version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2
version: 25b30aa063fc18e48662b86996252eabdcf2f0c7
- name: github.com/syndtr/goleveldb
version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4
version: b89cc31ef7977104127d34c1bd31ebd1a9db2199
subpackages:
- leveldb
- leveldb/cache
@ -93,7 +98,7 @@ imports:
- leveldb/table
- leveldb/util
- name: github.com/tendermint/abci
version: 864d1f80b36b440bde030a5c18d8ac3aa8c2949d
version: 191c4b6d176169ffc7f9972d490fa362a3b7d940
subpackages:
- client
- example/counter
@ -106,21 +111,18 @@ imports:
- edwards25519
- extra25519
- name: github.com/tendermint/go-crypto
version: 95b7c9e09c49b91bfbb71bb63dd514eb55450f16
version: 311e8c1bf00fa5868daad4f8ea56dcad539182c0
- name: github.com/tendermint/go-wire
version: 5f88da3dbc1a72844e6dfaf274ce87f851d488eb
subpackages:
- data
- data/base58
- name: github.com/tendermint/merkleeyes
version: 102aaf5a8ffda1846413fb22805a94def2045b9f
version: 2a93256d2c6fbcc3b55673c0d2b96a7e32c6238b
subpackages:
- app
- client
- iavl
- testutil
- name: github.com/tendermint/tmlibs
version: 7ce4da1eee6004d627e780c8fe91e96d9b99e459
version: 9997e3a3b46db1d2f88aa9816ed0e7915dad6ac1
subpackages:
- autofile
- cli
@ -128,12 +130,13 @@ imports:
- clist
- common
- db
- events
- flowrate
- log
- merkle
- test
- name: golang.org/x/crypto
version: c7af5bf2638a1164f2eb5467c39c6cffbd13a02e
version: 7d9177d70076375b9a59c8fde23d52d9c4a7ecd5
subpackages:
- curve25519
- nacl/box
@ -144,7 +147,7 @@ imports:
- ripemd160
- salsa20/salsa
- name: golang.org/x/net
version: feeb485667d1fdabe727840fe00adc22431bc86e
version: 0744d001aa8470aaa53df28d32e5ceeb8af9bd70
subpackages:
- context
- http2
@ -154,41 +157,46 @@ imports:
- lex/httplex
- trace
- name: golang.org/x/sys
version: e62c3de784db939836898e5c19ffd41bece347da
version: 429f518978ab01db8bb6f44b66785088e7fba58b
subpackages:
- unix
- name: golang.org/x/text
version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4
version: 1cbadb444a806fd9430d14ad08967ed91da4fa0a
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/genproto
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
version: 1e559d0a00eef8a9a43151db4665280bd8dd5886
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc
version: 844f573616520565fdc6fb4db242321b5456fd6d
version: d4b75ebd4f9f8c4a2b1cdadbdbe0d7920431ccca
subpackages:
- balancer
- codes
- connectivity
- credentials
- grpclb/grpc_lb_v1
- grpclb/grpc_lb_v1/messages
- grpclog
- internal
- keepalive
- metadata
- naming
- peer
- resolver
- stats
- status
- tap
- transport
- name: gopkg.in/go-playground/validator.v9
version: a021b2ec9a8a8bb970f3f15bc42617cb520e8a64
- name: gopkg.in/yaml.v2
version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
version: eb3733d160e74a9c7e442f435eb3bea458e1d19f
testImports:
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9
subpackages:
- spew
- name: github.com/pmezard/go-difflib
@ -196,7 +204,7 @@ testImports:
subpackages:
- difflib
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
version: 890a5c3458b43e6104ff5da8dfa139d013d77544
subpackages:
- assert
- require

+ 7
- 9
glide.yaml View File

@ -14,19 +14,23 @@ import:
- package: github.com/spf13/cobra
- package: github.com/spf13/viper
- package: github.com/tendermint/abci
version: v0.5.0
version: ~0.6.0
subpackages:
- client
- example/dummy
- types
- package: github.com/tendermint/go-crypto
version: ~0.2.2
version: ~0.3.0
- package: github.com/tendermint/go-wire
version: ~0.6.2
subpackages:
- data
- package: github.com/tendermint/merkleeyes
version: master
subpackages:
- iavl
- package: github.com/tendermint/tmlibs
version: ~0.2.2
version: ~0.3.1
subpackages:
- autofile
- cli
@ -46,12 +50,6 @@ import:
subpackages:
- context
- package: google.golang.org/grpc
- package: github.com/tendermint/merkleeyes
version: ~0.2.4
subpackages:
- app
- iavl
- testutil
testImport:
- package: github.com/go-kit/kit
subpackages:


+ 3
- 3
mempool/reactor.go View File

@ -60,18 +60,18 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
// AddPeer implements Reactor.
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
func (memR *MempoolReactor) AddPeer(peer *p2p.Peer) {
func (memR *MempoolReactor) AddPeer(peer p2p.Peer) {
go memR.broadcastTxRoutine(peer)
}
// RemovePeer implements Reactor.
func (memR *MempoolReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// broadcast routine checks if peer is gone and returns
}
// Receive implements Reactor.
// It adds any received transactions to the mempool.
func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
memR.Logger.Error("Error decoding message", "err", err)


+ 116
- 41
node/node.go View File

@ -3,6 +3,7 @@ package node
import (
"bytes"
"errors"
"fmt"
"net"
"net/http"
"strings"
@ -10,11 +11,15 @@ import (
abci "github.com/tendermint/abci/types"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/consensus"
mempl "github.com/tendermint/tendermint/mempool"
p2p "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core"
grpccore "github.com/tendermint/tendermint/rpc/grpc"
@ -26,20 +31,66 @@ import (
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
_ "net/http/pprof"
)
//------------------------------------------------------------------------------
// DBContext specifies config information for loading a new DB.
type DBContext struct {
ID string
Config *cfg.Config
}
// DBProvider takes a DBContext and returns an instantiated DB.
type DBProvider func(*DBContext) (dbm.DB, error)
// DefaultDBProvider returns a database using the DBBackend and DBDir
// specified in the ctx.Config.
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
return dbm.NewDB(ctx.ID, ctx.Config.DBBackend, ctx.Config.DBDir()), nil
}
// GenesisDocProvider returns a GenesisDoc.
// It allows the GenesisDoc to be pulled from sources other than the
// filesystem, for instance from a distributed key-value store cluster.
type GenesisDocProvider func() (*types.GenesisDoc, error)
// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
// the GenesisDoc from the config.GenesisFile() on the filesystem.
func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
return func() (*types.GenesisDoc, error) {
return types.GenesisDocFromFile(config.GenesisFile())
}
}
// NodeProvider takes a config and a logger and returns a ready to go Node.
type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
// DefaultNewNode returns a Tendermint node with default settings for the
// PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
// It implements NodeProvider.
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
return NewNode(config,
types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile()),
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
DefaultGenesisDocProviderFunc(config),
DefaultDBProvider,
logger)
}
//------------------------------------------------------------------------------
// Node is the highest level interface to a full Tendermint node.
// It includes all configuration information and running services.
type Node struct {
cmn.BaseService
// config
config *cfg.Config
genesisDoc *types.GenesisDoc // initial validator set
privValidator *types.PrivValidator // local node's validator key
genesisDoc *types.GenesisDoc // initial validator set
privValidator types.PrivValidator // local node's validator key
// network
privKey crypto.PrivKeyEd25519 // local node's p2p key
@ -58,24 +109,42 @@ type Node struct {
txIndexer txindex.TxIndexer
}
func NewNodeDefault(config *cfg.Config, logger log.Logger) *Node {
// Get PrivValidator
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile(), logger)
return NewNode(config, privValidator,
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), logger)
}
// NewNode returns a new, ready to go, Tendermint Node.
func NewNode(config *cfg.Config,
privValidator types.PrivValidator,
clientCreator proxy.ClientCreator,
genesisDocProvider GenesisDocProvider,
dbProvider DBProvider,
logger log.Logger) (*Node, error) {
func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator, logger log.Logger) *Node {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.DBBackend, config.DBDir())
blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
if err != nil {
return nil, err
}
blockStore := bc.NewBlockStore(blockStoreDB)
consensusLogger := logger.With("module", "consensus")
stateLogger := logger.With("module", "state")
// Get State
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir())
state := sm.GetState(stateDB, config.GenesisFile())
stateDB, err := dbProvider(&DBContext{"state", config})
if err != nil {
return nil, err
}
state := sm.LoadState(stateDB)
if state == nil {
genDoc, err := genesisDocProvider()
if err != nil {
return nil, err
}
state, err = sm.MakeGenesisState(stateDB, genDoc)
if err != nil {
return nil, err
}
state.Save()
}
state.SetLogger(stateLogger)
// Create the proxyApp, which manages connections (consensus, mempool, query)
@ -85,7 +154,7 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
proxyApp.SetLogger(logger.With("module", "proxy"))
if _, err := proxyApp.Start(); err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app connections: %v", err))
return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
}
// reload the state (it may have been updated by the handshake)
@ -96,7 +165,10 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
var txIndexer txindex.TxIndexer
switch config.TxIndex {
case "kv":
store := dbm.NewDB("tx_index", config.DBBackend, config.DBDir())
store, err := dbProvider(&DBContext{"tx_index", config})
if err != nil {
return nil, err
}
txIndexer = kv.NewTxIndex(store)
default:
txIndexer = &null.TxIndex{}
@ -109,9 +181,8 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
// Make event switch
eventSwitch := types.NewEventSwitch()
eventSwitch.SetLogger(logger.With("module", "types"))
_, err := eventSwitch.Start()
if err != nil {
cmn.Exit(cmn.Fmt("Failed to start switch: %v", err))
if _, err := eventSwitch.Start(); err != nil {
return nil, fmt.Errorf("Failed to start switch: %v", err)
}
// Decide whether to fast-sync or not
@ -119,13 +190,13 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
fastSync := config.FastSync
if state.Validators.Size() == 1 {
addr, _ := state.Validators.GetByIndex(0)
if bytes.Equal(privValidator.Address, addr) {
if bytes.Equal(privValidator.GetAddress(), addr) {
fastSync = false
}
}
// Log whether this node is a validator or an observer
if state.Validators.HasAddress(privValidator.Address) {
if state.Validators.HasAddress(privValidator.GetAddress()) {
consensusLogger.Info("This node is a validator")
} else {
consensusLogger.Info("This node is not a validator")
@ -232,12 +303,13 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
txIndexer: txIndexer,
}
node.BaseService = *cmn.NewBaseService(logger, "Node", node)
return node
return node, nil
}
// OnStart starts the Node. It implements cmn.Service.
func (n *Node) OnStart() error {
// Create & add listener
protocol, address := ProtocolAndAddress(n.config.P2P.ListenAddress)
protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress)
l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p"))
n.sw.AddListener(l)
@ -270,6 +342,7 @@ func (n *Node) OnStart() error {
return nil
}
// OnStop stops the Node. It implements cmn.Service.
func (n *Node) OnStop() {
n.BaseService.OnStop()
@ -285,6 +358,7 @@ func (n *Node) OnStop() {
}
}
// RunForever waits for an interupt signal and stops the node.
func (n *Node) RunForever() {
// Sleep forever and then...
cmn.TrapSignal(func() {
@ -292,15 +366,15 @@ func (n *Node) RunForever() {
})
}
// Add the event switch to reactors, mempool, etc.
// SetEventSwitch adds the event switch to reactors, mempool, etc.
func SetEventSwitch(evsw types.EventSwitch, eventables ...types.Eventable) {
for _, e := range eventables {
e.SetEventSwitch(evsw)
}
}
// Add a Listener to accept inbound peer connections.
// Add listeners before starting the Node.
// AddListener adds a listener to accept inbound peer connections.
// It should be called before starting the Node.
// The first listener is the primary listener (in NodeInfo)
func (n *Node) AddListener(l p2p.Listener) {
n.sw.AddListener(l)
@ -314,7 +388,7 @@ func (n *Node) ConfigureRPC() {
rpccore.SetConsensusState(n.consensusState)
rpccore.SetMempool(n.mempoolReactor.Mempool)
rpccore.SetSwitch(n.sw)
rpccore.SetPubKey(n.privValidator.PubKey)
rpccore.SetPubKey(n.privValidator.GetPubKey())
rpccore.SetGenesisDoc(n.genesisDoc)
rpccore.SetAddrBook(n.addrBook)
rpccore.SetProxyAppQuery(n.proxyApp.Query())
@ -360,39 +434,48 @@ func (n *Node) startRPC() ([]net.Listener, error) {
return listeners, nil
}
// Switch returns the Node's Switch.
func (n *Node) Switch() *p2p.Switch {
return n.sw
}
// BlockStore returns the Node's BlockStore.
func (n *Node) BlockStore() *bc.BlockStore {
return n.blockStore
}
// ConsensusState returns the Node's ConsensusState.
func (n *Node) ConsensusState() *consensus.ConsensusState {
return n.consensusState
}
// ConsensusReactor returns the Node's ConsensusReactor.
func (n *Node) ConsensusReactor() *consensus.ConsensusReactor {
return n.consensusReactor
}
// MempoolReactor returns the Node's MempoolReactor.
func (n *Node) MempoolReactor() *mempl.MempoolReactor {
return n.mempoolReactor
}
// EventSwitch returns the Node's EventSwitch.
func (n *Node) EventSwitch() types.EventSwitch {
return n.evsw
}
// XXX: for convenience
func (n *Node) PrivValidator() *types.PrivValidator {
// PrivValidator returns the Node's PrivValidator.
// XXX: for convenience only!
func (n *Node) PrivValidator() types.PrivValidator {
return n.privValidator
}
// GenesisDoc returns the Node's GenesisDoc.
func (n *Node) GenesisDoc() *types.GenesisDoc {
return n.genesisDoc
}
// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
func (n *Node) ProxyApp() proxy.AppConns {
return n.proxyApp
}
@ -442,22 +525,14 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
//------------------------------------------------------------------------------
// NodeInfo returns the Node's Info from the Switch.
func (n *Node) NodeInfo() *p2p.NodeInfo {
return n.sw.NodeInfo()
}
// DialSeeds dials the given seeds on the Switch.
func (n *Node) DialSeeds(seeds []string) error {
return n.sw.DialSeeds(n.addrBook, seeds)
}
// Defaults to tcp
func ProtocolAndAddress(listenAddr string) (string, string) {
protocol, address := "tcp", listenAddr
parts := strings.SplitN(address, "://", 2)
if len(parts) == 2 {
protocol, address = parts[0], parts[1]
}
return protocol, address
}
//------------------------------------------------------------------------------

+ 6
- 2
node/node_test.go View File

@ -4,15 +4,19 @@ import (
"testing"
"time"
cfg "github.com/tendermint/tendermint/config"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config"
)
func TestNodeStartStop(t *testing.T) {
config := cfg.ResetTestRoot("node_node_test")
// Create & start node
n := NewNodeDefault(config, log.TestingLogger())
n, err := DefaultNewNode(config, log.TestingLogger())
assert.NoError(t, err, "expected no err on DefaultNewNode")
n.Start()
t.Logf("Started node %v", n.sw.NodeInfo())


+ 7
- 4
p2p/listener.go View File

@ -87,7 +87,7 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
}
// Otherwise just use the local address...
if extAddr == nil {
extAddr = getNaiveExternalAddress(listenerPort)
extAddr = getNaiveExternalAddress(listenerPort, false, logger)
}
if extAddr == nil {
cmn.PanicCrisis("Could not determine external address!")
@ -197,7 +197,7 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *
}
// TODO: use syscalls: http://pastebin.com/9exZG4rh
func getNaiveExternalAddress(port int) *NetAddress {
func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress {
addrs, err := net.InterfaceAddrs()
if err != nil {
cmn.PanicCrisis(cmn.Fmt("Could not fetch interface addresses: %v", err))
@ -209,10 +209,13 @@ func getNaiveExternalAddress(port int) *NetAddress {
continue
}
v4 := ipnet.IP.To4()
if v4 == nil || v4[0] == 127 {
if v4 == nil || (!settleForLocal && v4[0] == 127) {
continue
} // loopback
return NewNetAddressIPPort(ipnet.IP, uint16(port))
}
return nil
// try again, but settle for local
logger.Info("Node may not be connected to internet. Settling for local address")
return getNaiveExternalAddress(port, true, logger)
}

+ 71
- 33
p2p/peer.go View File

@ -12,12 +12,29 @@ import (
cmn "github.com/tendermint/tmlibs/common"
)
// Peer is an interface representing a peer connected on a reactor.
type Peer interface {
cmn.Service
Key() string
IsOutbound() bool
IsPersistent() bool
NodeInfo() *NodeInfo
Status() ConnectionStatus
Send(byte, interface{}) bool
TrySend(byte, interface{}) bool
Set(string, interface{})
Get(string) interface{}
}
// Peer could be marked as persistent, in which case you can use
// Redial function to reconnect. Note that inbound peers can't be
// made persistent. They should be made persistent on the other end.
//
// Before using a peer, you will need to perform a handshake on connection.
type Peer struct {
type peer struct {
cmn.BaseService
outbound bool
@ -28,9 +45,9 @@ type Peer struct {
persistent bool
config *PeerConfig
*NodeInfo
Key string
Data *cmn.CMap // User data.
nodeInfo *NodeInfo
key string
Data *cmn.CMap // User data.
}
// PeerConfig is a Peer configuration.
@ -60,7 +77,7 @@ func DefaultPeerConfig() *PeerConfig {
}
func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
conn, err := dial(addr, config)
if err != nil {
@ -76,13 +93,13 @@ func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []
}
func newInboundPeer(conn net.Conn, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
return newPeerFromConnAndConfig(conn, false, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config)
}
func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
conn := rawConn
@ -104,7 +121,7 @@ func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[
}
// Key and NodeInfo are set after Handshake
p := &Peer{
p := &peer{
outbound: outbound,
conn: conn,
config: config,
@ -119,12 +136,12 @@ func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[
}
// CloseConn should be used when the peer was created, but never started.
func (p *Peer) CloseConn() {
func (p *peer) CloseConn() {
p.conn.Close()
}
// makePersistent marks the peer as persistent.
func (p *Peer) makePersistent() {
func (p *peer) makePersistent() {
if !p.outbound {
panic("inbound peers can't be made persistent")
}
@ -133,13 +150,13 @@ func (p *Peer) makePersistent() {
}
// IsPersistent returns true if the peer is persitent, false otherwise.
func (p *Peer) IsPersistent() bool {
func (p *peer) IsPersistent() bool {
return p.persistent
}
// HandshakeTimeout performs a handshake between a given node and the peer.
// NOTE: blocking
func (p *Peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) error {
func (p *peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) error {
// Set deadline for handshake so we don't block forever on conn.ReadFull
p.conn.SetDeadline(time.Now().Add(timeout))
@ -176,19 +193,19 @@ func (p *Peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) er
peerNodeInfo.RemoteAddr = p.Addr().String()
p.NodeInfo = peerNodeInfo
p.Key = peerNodeInfo.PubKey.KeyString()
p.nodeInfo = peerNodeInfo
p.key = peerNodeInfo.PubKey.KeyString()
return nil
}
// Addr returns peer's remote network address.
func (p *Peer) Addr() net.Addr {
func (p *peer) Addr() net.Addr {
return p.conn.RemoteAddr()
}
// PubKey returns peer's public key.
func (p *Peer) PubKey() crypto.PubKeyEd25519 {
func (p *peer) PubKey() crypto.PubKeyEd25519 {
if p.config.AuthEnc {
return p.conn.(*SecretConnection).RemotePubKey()
}
@ -199,31 +216,31 @@ func (p *Peer) PubKey() crypto.PubKeyEd25519 {
}
// OnStart implements BaseService.
func (p *Peer) OnStart() error {
func (p *peer) OnStart() error {
p.BaseService.OnStart()
_, err := p.mconn.Start()
return err
}
// OnStop implements BaseService.
func (p *Peer) OnStop() {
func (p *peer) OnStop() {
p.BaseService.OnStop()
p.mconn.Stop()
}
// Connection returns underlying MConnection.
func (p *Peer) Connection() *MConnection {
func (p *peer) Connection() *MConnection {
return p.mconn
}
// IsOutbound returns true if the connection is outbound, false otherwise.
func (p *Peer) IsOutbound() bool {
func (p *peer) IsOutbound() bool {
return p.outbound
}
// Send msg to the channel identified by chID byte. Returns false if the send
// queue is full after timeout, specified by MConnection.
func (p *Peer) Send(chID byte, msg interface{}) bool {
func (p *peer) Send(chID byte, msg interface{}) bool {
if !p.IsRunning() {
// see Switch#Broadcast, where we fetch the list of peers and loop over
// them - while we're looping, one peer may be removed and stopped.
@ -234,7 +251,7 @@ func (p *Peer) Send(chID byte, msg interface{}) bool {
// TrySend msg to the channel identified by chID byte. Immediately returns
// false if the send queue is full.
func (p *Peer) TrySend(chID byte, msg interface{}) bool {
func (p *peer) TrySend(chID byte, msg interface{}) bool {
if !p.IsRunning() {
return false
}
@ -242,7 +259,7 @@ func (p *Peer) TrySend(chID byte, msg interface{}) bool {
}
// CanSend returns true if the send queue is not full, false otherwise.
func (p *Peer) CanSend(chID byte) bool {
func (p *peer) CanSend(chID byte) bool {
if !p.IsRunning() {
return false
}
@ -250,32 +267,53 @@ func (p *Peer) CanSend(chID byte) bool {
}
// WriteTo writes the peer's public key to w.
func (p *Peer) WriteTo(w io.Writer) (n int64, err error) {
func (p *peer) WriteTo(w io.Writer) (n int64, err error) {
var n_ int
wire.WriteString(p.Key, w, &n_, &err)
wire.WriteString(p.key, w, &n_, &err)
n += int64(n_)
return
}
// String representation.
func (p *Peer) String() string {
func (p *peer) String() string {
if p.outbound {
return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.Key[:12])
return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.key[:12])
}
return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.Key[:12])
return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.key[:12])
}
// Equals reports whenever 2 peers are actually represent the same node.
func (p *Peer) Equals(other *Peer) bool {
return p.Key == other.Key
func (p *peer) Equals(other Peer) bool {
return p.key == other.Key()
}
// Get the data for a given key.
func (p *Peer) Get(key string) interface{} {
func (p *peer) Get(key string) interface{} {
return p.Data.Get(key)
}
// Set sets the data for the given key.
func (p *peer) Set(key string, data interface{}) {
p.Data.Set(key, data)
}
// Key returns the peer's id key.
func (p *peer) Key() string {
return p.key
}
// NodeInfo returns a copy of the peer's NodeInfo.
func (p *peer) NodeInfo() *NodeInfo {
n := *p.nodeInfo // copy
return &n
}
// Status returns the peer's ConnectionStatus.
func (p *peer) Status() ConnectionStatus {
return p.mconn.Status()
}
func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
conn, err := addr.DialTimeout(config.DialTimeout * time.Second)
if err != nil {
@ -284,8 +322,8 @@ func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
return conn, nil
}
func createMConnection(conn net.Conn, p *Peer, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
onPeerError func(*Peer, interface{}), config *MConnConfig) *MConnection {
func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
onPeerError func(Peer, interface{}), config *MConnConfig) *MConnection {
onReceive := func(chID byte, msgBytes []byte) {
reactor := reactorsByCh[chID]


+ 29
- 23
p2p/peer_set.go View File

@ -7,8 +7,8 @@ import (
// IPeerSet has a (immutable) subset of the methods of PeerSet.
type IPeerSet interface {
Has(key string) bool
Get(key string) *Peer
List() []*Peer
Get(key string) Peer
List() []Peer
Size() int
}
@ -19,26 +19,28 @@ type IPeerSet interface {
type PeerSet struct {
mtx sync.Mutex
lookup map[string]*peerSetItem
list []*Peer
list []Peer
}
type peerSetItem struct {
peer *Peer
peer Peer
index int
}
// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items.
func NewPeerSet() *PeerSet {
return &PeerSet{
lookup: make(map[string]*peerSetItem),
list: make([]*Peer, 0, 256),
list: make([]Peer, 0, 256),
}
}
// Returns false if peer with key (PubKeyEd25519) is already set
func (ps *PeerSet) Add(peer *Peer) error {
// Add adds the peer to the PeerSet.
// It returns ErrSwitchDuplicatePeer if the peer is already present.
func (ps *PeerSet) Add(peer Peer) error {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.lookup[peer.Key] != nil {
if ps.lookup[peer.Key()] != nil {
return ErrSwitchDuplicatePeer
}
@ -46,18 +48,21 @@ func (ps *PeerSet) Add(peer *Peer) error {
// Appending is safe even with other goroutines
// iterating over the ps.list slice.
ps.list = append(ps.list, peer)
ps.lookup[peer.Key] = &peerSetItem{peer, index}
ps.lookup[peer.Key()] = &peerSetItem{peer, index}
return nil
}
// Has returns true iff the PeerSet contains
// the peer referred to by this peerKey.
func (ps *PeerSet) Has(peerKey string) bool {
ps.mtx.Lock()
defer ps.mtx.Unlock()
_, ok := ps.lookup[peerKey]
ps.mtx.Unlock()
return ok
}
func (ps *PeerSet) Get(peerKey string) *Peer {
// Get looks up a peer by the provided peerKey.
func (ps *PeerSet) Get(peerKey string) Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item, ok := ps.lookup[peerKey]
@ -68,45 +73,46 @@ func (ps *PeerSet) Get(peerKey string) *Peer {
}
}
func (ps *PeerSet) Remove(peer *Peer) {
// Remove discards peer by its Key, if the peer was previously memoized.
func (ps *PeerSet) Remove(peer Peer) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item := ps.lookup[peer.Key]
item := ps.lookup[peer.Key()]
if item == nil {
return
}
index := item.index
// Copy the list but without the last element.
// (we must copy because we're mutating the list)
newList := make([]*Peer, len(ps.list)-1)
// Create a new copy of the list but with one less item.
// (we must copy because we'll be mutating the list).
newList := make([]Peer, len(ps.list)-1)
copy(newList, ps.list)
// If it's the last peer, that's an easy special case.
if index == len(ps.list)-1 {
ps.list = newList
delete(ps.lookup, peer.Key)
delete(ps.lookup, peer.Key())
return
}
// Move the last item from ps.list to "index" in list.
// Replace the popped item with the last item in the old list.
lastPeer := ps.list[len(ps.list)-1]
lastPeerKey := lastPeer.Key
lastPeerKey := lastPeer.Key()
lastPeerItem := ps.lookup[lastPeerKey]
newList[index] = lastPeer
lastPeerItem.index = index
ps.list = newList
delete(ps.lookup, peer.Key)
delete(ps.lookup, peer.Key())
}
// Size returns the number of unique items in the peerSet.
func (ps *PeerSet) Size() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return len(ps.list)
}
// threadsafe list of peers.
func (ps *PeerSet) List() []*Peer {
// List returns the threadsafe list of peers.
func (ps *PeerSet) List() []Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.list


+ 99
- 19
p2p/peer_set_test.go View File

@ -2,47 +2,69 @@ package p2p
import (
"math/rand"
"sync"
"testing"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/tmlibs/common"
)
// Returns an empty dummy peer
func randPeer() *Peer {
return &Peer{
Key: cmn.RandStr(12),
NodeInfo: &NodeInfo{
func randPeer() *peer {
return &peer{
key: cmn.RandStr(12),
nodeInfo: &NodeInfo{
RemoteAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
},
}
}
func TestAddRemoveOne(t *testing.T) {
func TestPeerSetAddRemoveOne(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peer := randPeer()
err := peerSet.Add(peer)
if err != nil {
t.Errorf("Failed to add new peer")
var peerList []Peer
for i := 0; i < 5; i++ {
p := randPeer()
peerSet.Add(p)
peerList = append(peerList, p)
}
if peerSet.Size() != 1 {
t.Errorf("Failed to add new peer and increment size")
n := len(peerList)
// 1. Test removing from the front
for i, peerAtFront := range peerList {
peerSet.Remove(peerAtFront)
wantSize := n - i - 1
for j := 0; j < 2; j++ {
assert.Equal(t, false, peerSet.Has(peerAtFront.Key()), "#%d Run #%d: failed to remove peer", i, j)
assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j)
// Test the route of removing the now non-existent element
peerSet.Remove(peerAtFront)
}
}
peerSet.Remove(peer)
if peerSet.Has(peer.Key) {
t.Errorf("Failed to remove peer")
// 2. Next we are testing removing the peer at the end
// a) Replenish the peerSet
for _, peer := range peerList {
peerSet.Add(peer)
}
if peerSet.Size() != 0 {
t.Errorf("Failed to remove peer and decrement size")
// b) In reverse, remove each element
for i := n - 1; i >= 0; i-- {
peerAtEnd := peerList[i]
peerSet.Remove(peerAtEnd)
assert.Equal(t, false, peerSet.Has(peerAtEnd.Key()), "#%d: failed to remove item at end", i)
assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i)
}
}
func TestAddRemoveMany(t *testing.T) {
func TestPeerSetAddRemoveMany(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peers := []*Peer{}
peers := []Peer{}
N := 100
for i := 0; i < N; i++ {
peer := randPeer()
@ -57,7 +79,7 @@ func TestAddRemoveMany(t *testing.T) {
for i, peer := range peers {
peerSet.Remove(peer)
if peerSet.Has(peer.Key) {
if peerSet.Has(peer.Key()) {
t.Errorf("Failed to remove peer")
}
if peerSet.Size() != len(peers)-i-1 {
@ -65,3 +87,61 @@ func TestAddRemoveMany(t *testing.T) {
}
}
}
func TestPeerSetAddDuplicate(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peer := randPeer()
n := 20
errsChan := make(chan error)
// Add the same asynchronously to test the
// concurrent guarantees of our APIs, and
// our expectation in the end is that only
// one addition succeeded, but the rest are
// instances of ErrSwitchDuplicatePeer.
for i := 0; i < n; i++ {
go func() {
errsChan <- peerSet.Add(peer)
}()
}
// Now collect and tally the results
errsTally := make(map[error]int)
for i := 0; i < n; i++ {
err := <-errsChan
errsTally[err] += 1
}
// Our next procedure is to ensure that only one addition
// succeeded and that the rest are each ErrSwitchDuplicatePeer.
wantErrCount, gotErrCount := n-1, errsTally[ErrSwitchDuplicatePeer]
assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count")
wantNilErrCount, gotNilErrCount := 1, errsTally[nil]
assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
}
func TestPeerSetGet(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peer := randPeer()
assert.Nil(t, peerSet.Get(peer.Key()), "expecting a nil lookup, before .Add")
if err := peerSet.Add(peer); err != nil {
t.Fatalf("Failed to add new peer: %v", err)
}
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
// Add them asynchronously to test the
// concurrent guarantees of our APIs.
wg.Add(1)
go func(i int) {
defer wg.Done()
got, want := peerSet.Get(peer.Key()), peer
assert.Equal(t, got, want, "#%d: got=%v want=%v", i, got, want)
}(i)
}
wg.Wait()
}

+ 3
- 3
p2p/peer_test.go View File

@ -76,13 +76,13 @@ func TestPeerSend(t *testing.T) {
assert.True(p.Send(0x01, "Asylum"))
}
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*Peer, error) {
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) {
chDescs := []*ChannelDescriptor{
&ChannelDescriptor{ID: 0x01, Priority: 1},
}
reactorsByCh := map[byte]Reactor{0x01: NewTestReactor(chDescs, true)}
pk := crypto.GenPrivKeyEd25519()
p, err := newOutboundPeer(addr, reactorsByCh, chDescs, func(p *Peer, r interface{}) {}, pk, config)
p, err := newOutboundPeer(addr, reactorsByCh, chDescs, func(p Peer, r interface{}) {}, pk, config)
if err != nil {
return nil, err
}
@ -133,7 +133,7 @@ func (p *remotePeer) accept(l net.Listener) {
if err != nil {
golog.Fatalf("Failed to accept conn: %+v", err)
}
peer, err := newInboundPeer(conn, make(map[byte]Reactor), make([]*ChannelDescriptor, 0), func(p *Peer, r interface{}) {}, p.PrivKey, p.Config)
peer, err := newInboundPeer(conn, make(map[byte]Reactor), make([]*ChannelDescriptor, 0), func(p Peer, r interface{}) {}, p.PrivKey, p.Config)
if err != nil {
golog.Fatalf("Failed to create a peer: %+v", err)
}


+ 9
- 9
p2p/pex_reactor.go View File

@ -92,7 +92,7 @@ func (r *PEXReactor) GetChannels() []*ChannelDescriptor {
// AddPeer implements Reactor by adding peer to the address book (if inbound)
// or by requesting more addresses (if outbound).
func (r *PEXReactor) AddPeer(p *Peer) {
func (r *PEXReactor) AddPeer(p Peer) {
if p.IsOutbound() {
// For outbound peers, the address is already in the books.
// Either it was added in DialSeeds or when we
@ -101,10 +101,10 @@ func (r *PEXReactor) AddPeer(p *Peer) {
r.RequestPEX(p)
}
} else { // For inbound connections, the peer is its own source
addr, err := NewNetAddressString(p.ListenAddr)
addr, err := NewNetAddressString(p.NodeInfo().ListenAddr)
if err != nil {
// this should never happen
r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.ListenAddr, "err", err)
r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.NodeInfo().ListenAddr, "err", err)
return
}
r.book.AddAddress(addr, addr)
@ -112,15 +112,15 @@ func (r *PEXReactor) AddPeer(p *Peer) {
}
// RemovePeer implements Reactor.
func (r *PEXReactor) RemovePeer(p *Peer, reason interface{}) {
func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
// If we aren't keeping track of local temp data for each peer here, then we
// don't have to do anything.
}
// Receive implements Reactor by handling incoming PEX messages.
func (r *PEXReactor) Receive(chID byte, src *Peer, msgBytes []byte) {
srcAddr := src.Connection().RemoteAddress
srcAddrStr := srcAddr.String()
func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
srcAddrStr := src.NodeInfo().RemoteAddr
srcAddr, _ := NewNetAddressString(srcAddrStr)
r.IncrementMsgCountForPeer(srcAddrStr)
if r.ReachedMaxMsgCountForPeer(srcAddrStr) {
@ -154,12 +154,12 @@ func (r *PEXReactor) Receive(chID byte, src *Peer, msgBytes []byte) {
}
// RequestPEX asks peer for more addresses.
func (r *PEXReactor) RequestPEX(p *Peer) {
func (r *PEXReactor) RequestPEX(p Peer) {
p.Send(PexChannel, struct{ PexMessage }{&pexRequestMessage{}})
}
// SendAddrs sends addrs to the peer.
func (r *PEXReactor) SendAddrs(p *Peer, addrs []*NetAddress) {
func (r *PEXReactor) SendAddrs(p Peer, addrs []*NetAddress) {
p.Send(PexChannel, struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
}


+ 7
- 6
p2p/pex_reactor_test.go View File

@ -129,7 +129,7 @@ func TestPEXReactorReceive(t *testing.T) {
peer := createRandomPeer(false)
size := book.Size()
netAddr, _ := NewNetAddressString(peer.ListenAddr)
netAddr, _ := NewNetAddressString(peer.NodeInfo().ListenAddr)
addrs := []*NetAddress{netAddr}
msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
r.Receive(PexChannel, peer, msg)
@ -159,16 +159,17 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) {
r.Receive(PexChannel, peer, msg)
}
assert.True(r.ReachedMaxMsgCountForPeer(peer.ListenAddr))
assert.True(r.ReachedMaxMsgCountForPeer(peer.NodeInfo().ListenAddr))
}
func createRandomPeer(outbound bool) *Peer {
func createRandomPeer(outbound bool) *peer {
addr := cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256)
netAddr, _ := NewNetAddressString(addr)
p := &Peer{
Key: cmn.RandStr(12),
NodeInfo: &NodeInfo{
p := &peer{
key: cmn.RandStr(12),
nodeInfo: &NodeInfo{
ListenAddr: addr,
RemoteAddr: netAddr.String(),
},
outbound: outbound,
mconn: &MConnection{RemoteAddress: netAddr},


+ 26
- 25
p2p/switch.go View File

@ -22,9 +22,9 @@ type Reactor interface {
SetSwitch(*Switch)
GetChannels() []*ChannelDescriptor
AddPeer(peer *Peer)
RemovePeer(peer *Peer, reason interface{})
Receive(chID byte, peer *Peer, msgBytes []byte)
AddPeer(peer Peer)
RemovePeer(peer Peer, reason interface{})
Receive(chID byte, peer Peer, msgBytes []byte)
}
//--------------------------------------
@ -44,10 +44,10 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor {
func (br *BaseReactor) SetSwitch(sw *Switch) {
br.Switch = sw
}
func (_ *BaseReactor) GetChannels() []*ChannelDescriptor { return nil }
func (_ *BaseReactor) AddPeer(peer *Peer) {}
func (_ *BaseReactor) RemovePeer(peer *Peer, reason interface{}) {}
func (_ *BaseReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {}
func (_ *BaseReactor) GetChannels() []*ChannelDescriptor { return nil }
func (_ *BaseReactor) AddPeer(peer Peer) {}
func (_ *BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
func (_ *BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
//-----------------------------------------------------------------------------
@ -209,11 +209,12 @@ func (sw *Switch) OnStop() {
}
}
// AddPeer checks the given peer's validity, performs a handshake, and adds the peer to the switch
// addPeer checks the given peer's validity, performs a handshake, and adds the peer to the switch
// and to all registered reactors.
// NOTE: This performs a blocking handshake before the peer is added.
// CONTRACT: If error is returned, peer is nil, and conn is immediately closed.
func (sw *Switch) AddPeer(peer *Peer) error {
func (sw *Switch) addPeer(peer *peer) error {
if err := sw.FilterConnByAddr(peer.Addr()); err != nil {
return err
}
@ -232,12 +233,12 @@ func (sw *Switch) AddPeer(peer *Peer) error {
}
// Check version, chain id
if err := sw.nodeInfo.CompatibleWith(peer.NodeInfo); err != nil {
if err := sw.nodeInfo.CompatibleWith(peer.NodeInfo()); err != nil {
return err
}
// Check for duplicate peer
if sw.peers.Has(peer.Key) {
if sw.peers.Has(peer.Key()) {
return ErrSwitchDuplicatePeer
}
@ -285,7 +286,7 @@ func (sw *Switch) SetPubKeyFilter(f func(crypto.PubKeyEd25519) error) {
sw.filterConnByPubKey = f
}
func (sw *Switch) startInitPeer(peer *Peer) {
func (sw *Switch) startInitPeer(peer *peer) {
peer.Start() // spawn send/recv routines
for _, reactor := range sw.reactors {
reactor.AddPeer(peer)
@ -335,9 +336,9 @@ func (sw *Switch) dialSeed(addr *NetAddress) {
}
}
// DialPeerWithAddress dials the given peer and runs sw.AddPeer if it connects successfully.
// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects successfully.
// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails.
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer, error) {
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (Peer, error) {
sw.dialing.Set(addr.IP.String(), addr)
defer sw.dialing.Delete(addr.IP.String())
@ -351,7 +352,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer,
if persistent {
peer.makePersistent()
}
err = sw.AddPeer(peer)
err = sw.addPeer(peer)
if err != nil {
sw.Logger.Error("Failed to add peer", "address", addr, "err", err)
peer.CloseConn()
@ -375,7 +376,7 @@ func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool {
successChan := make(chan bool, len(sw.peers.List()))
sw.Logger.Debug("Broadcast", "channel", chID, "msg", msg)
for _, peer := range sw.peers.List() {
go func(peer *Peer) {
go func(peer Peer) {
success := peer.Send(chID, msg)
successChan <- success
}(peer)
@ -387,7 +388,7 @@ func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool {
func (sw *Switch) NumPeers() (outbound, inbound, dialing int) {
peers := sw.peers.List()
for _, peer := range peers {
if peer.outbound {
if peer.IsOutbound() {
outbound++
} else {
inbound++
@ -405,8 +406,8 @@ func (sw *Switch) Peers() IPeerSet {
// StopPeerForError disconnects from a peer due to external error.
// If the peer is persistent, it will attempt to reconnect.
// TODO: make record depending on reason.
func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
addr := NewNetAddress(peer.Addr())
func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) {
addr, _ := NewNetAddressString(peer.NodeInfo().RemoteAddr)
sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason)
sw.stopAndRemovePeer(peer, reason)
@ -438,12 +439,12 @@ func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
// StopPeerGracefully disconnects from a peer gracefully.
// TODO: handle graceful disconnects.
func (sw *Switch) StopPeerGracefully(peer *Peer) {
func (sw *Switch) StopPeerGracefully(peer Peer) {
sw.Logger.Info("Stopping peer gracefully")
sw.stopAndRemovePeer(peer, nil)
}
func (sw *Switch) stopAndRemovePeer(peer *Peer, reason interface{}) {
func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
sw.peers.Remove(peer)
peer.Stop()
for _, reactor := range sw.reactors {
@ -483,11 +484,11 @@ func (sw *Switch) listenerRoutine(l Listener) {
//-----------------------------------------------------------------------------
type SwitchEventNewPeer struct {
Peer *Peer
Peer Peer
}
type SwitchEventDonePeer struct {
Peer *Peer
Peer Peer
Error interface{}
}
@ -581,7 +582,7 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
return err
}
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
if err = sw.AddPeer(peer); err != nil {
if err = sw.addPeer(peer); err != nil {
conn.Close()
return err
}
@ -596,7 +597,7 @@ func (sw *Switch) addPeerWithConnectionAndConfig(conn net.Conn, config *PeerConf
return err
}
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
if err = sw.AddPeer(peer); err != nil {
if err = sw.addPeer(peer); err != nil {
conn.Close()
return err
}


+ 8
- 8
p2p/switch_test.go View File

@ -37,8 +37,8 @@ type TestReactor struct {
mtx sync.Mutex
channels []*ChannelDescriptor
peersAdded []*Peer
peersRemoved []*Peer
peersAdded []Peer
peersRemoved []Peer
logMessages bool
msgsCounter int
msgsReceived map[byte][]PeerMessage
@ -59,24 +59,24 @@ func (tr *TestReactor) GetChannels() []*ChannelDescriptor {
return tr.channels
}
func (tr *TestReactor) AddPeer(peer *Peer) {
func (tr *TestReactor) AddPeer(peer Peer) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersAdded = append(tr.peersAdded, peer)
}
func (tr *TestReactor) RemovePeer(peer *Peer, reason interface{}) {
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersRemoved = append(tr.peersRemoved, peer)
}
func (tr *TestReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {
func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
if tr.logMessages {
tr.mtx.Lock()
defer tr.mtx.Unlock()
//fmt.Printf("Received: %X, %X\n", chID, msgBytes)
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.Key, msgBytes, tr.msgsCounter})
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.Key(), msgBytes, tr.msgsCounter})
tr.msgsCounter++
}
}
@ -246,7 +246,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, DefaultPeerConfig())
require.Nil(err)
err = sw.AddPeer(peer)
err = sw.addPeer(peer)
require.Nil(err)
// simulate failure by closing connection
@ -273,7 +273,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, DefaultPeerConfig())
peer.makePersistent()
require.Nil(err)
err = sw.AddPeer(peer)
err = sw.addPeer(peer)
require.Nil(err)
// simulate failure by closing connection


+ 10
- 10
proxy/app_conn.go View File

@ -12,9 +12,9 @@ type AppConnConsensus interface {
SetResponseCallback(abcicli.Callback)
Error() error
InitChainSync(validators []*types.Validator) (err error)
InitChainSync(types.RequestInitChain) (err error)
BeginBlockSync(hash []byte, header *types.Header) (err error)
BeginBlockSync(types.RequestBeginBlock) (err error)
DeliverTxAsync(tx []byte) *abcicli.ReqRes
EndBlockSync(height uint64) (types.ResponseEndBlock, error)
CommitSync() (res types.Result)
@ -34,8 +34,8 @@ type AppConnQuery interface {
Error() error
EchoSync(string) (res types.Result)
InfoSync() (resInfo types.ResponseInfo, err error)
QuerySync(reqQuery types.RequestQuery) (resQuery types.ResponseQuery, err error)
InfoSync(types.RequestInfo) (types.ResponseInfo, error)
QuerySync(types.RequestQuery) (types.ResponseQuery, error)
// SetOptionSync(key string, value string) (res types.Result)
}
@ -61,12 +61,12 @@ func (app *appConnConsensus) Error() error {
return app.appConn.Error()
}
func (app *appConnConsensus) InitChainSync(validators []*types.Validator) (err error) {
return app.appConn.InitChainSync(validators)
func (app *appConnConsensus) InitChainSync(req types.RequestInitChain) (err error) {
return app.appConn.InitChainSync(req)
}
func (app *appConnConsensus) BeginBlockSync(hash []byte, header *types.Header) (err error) {
return app.appConn.BeginBlockSync(hash, header)
func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (err error) {
return app.appConn.BeginBlockSync(req)
}
func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes {
@ -135,8 +135,8 @@ func (app *appConnQuery) EchoSync(msg string) (res types.Result) {
return app.appConn.EchoSync(msg)
}
func (app *appConnQuery) InfoSync() (types.ResponseInfo, error) {
return app.appConn.InfoSync()
func (app *appConnQuery) InfoSync(req types.RequestInfo) (types.ResponseInfo, error) {
return app.appConn.InfoSync(req)
}
func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (types.ResponseQuery, error) {


+ 5
- 5
proxy/app_conn_test.go View File

@ -17,7 +17,7 @@ import (
type AppConnTest interface {
EchoAsync(string) *abcicli.ReqRes
FlushSync() error
InfoSync() (types.ResponseInfo, error)
InfoSync(types.RequestInfo) (types.ResponseInfo, error)
}
type appConnTest struct {
@ -36,8 +36,8 @@ func (app *appConnTest) FlushSync() error {
return app.appConn.FlushSync()
}
func (app *appConnTest) InfoSync() (types.ResponseInfo, error) {
return app.appConn.InfoSync()
func (app *appConnTest) InfoSync(req types.RequestInfo) (types.ResponseInfo, error) {
return app.appConn.InfoSync(req)
}
//----------------------------------------
@ -109,7 +109,7 @@ func BenchmarkEcho(b *testing.B) {
proxy.FlushSync()
b.StopTimer()
// info := proxy.InfoSync()
// info := proxy.InfoSync(types.RequestInfo{""})
//b.Log("N: ", b.N, info)
}
@ -138,7 +138,7 @@ func TestInfo(t *testing.T) {
proxy := NewAppConnTest(cli)
t.Log("Connected")
resInfo, err := proxy.InfoSync()
resInfo, err := proxy.InfoSync(types.RequestInfo{""})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}


+ 12
- 3
rpc/client/event_test.go View File

@ -5,11 +5,20 @@ import (
"time"
"github.com/stretchr/testify/require"
merktest "github.com/tendermint/merkleeyes/testutil"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
)
// MakeTxKV returns a text transaction, allong with expected key, value pair
func MakeTxKV() ([]byte, []byte, []byte) {
k := []byte(cmn.RandStr(8))
v := []byte(cmn.RandStr(8))
return k, v, append(k, append([]byte("="), v...)...)
}
func TestHeaderEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
@ -76,7 +85,7 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) {
}
// make the tx
_, _, tx := merktest.MakeTxKV()
_, _, tx := MakeTxKV()
evtTyp := types.EventStringTx(types.Tx(tx))
// send async
@ -109,7 +118,7 @@ func TestTxEventsSentWithBroadcastTxSync(t *testing.T) {
}
// make the tx
_, _, tx := merktest.MakeTxKV()
_, _, tx := MakeTxKV()
evtTyp := types.EventStringTx(types.Tx(tx))
// send async


+ 4
- 4
rpc/client/httpclient.go View File

@ -143,7 +143,7 @@ func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) {
return result, nil
}
func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
func (c *HTTP) Block(height *int) (*ctypes.ResultBlock, error) {
result := new(ctypes.ResultBlock)
_, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result)
if err != nil {
@ -152,7 +152,7 @@ func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
return result, nil
}
func (c *HTTP) Commit(height int) (*ctypes.ResultCommit, error) {
func (c *HTTP) Commit(height *int) (*ctypes.ResultCommit, error) {
result := new(ctypes.ResultCommit)
_, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result)
if err != nil {
@ -174,9 +174,9 @@ func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
return result, nil
}
func (c *HTTP) Validators() (*ctypes.ResultValidators, error) {
func (c *HTTP) Validators(height *int) (*ctypes.ResultValidators, error) {
result := new(ctypes.ResultValidators)
_, err := c.rpc.Call("validators", map[string]interface{}{}, result)
_, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result)
if err != nil {
return nil, errors.Wrap(err, "Validators")
}


+ 3
- 3
rpc/client/interface.go View File

@ -42,9 +42,9 @@ type ABCIClient interface {
// SignClient groups together the interfaces need to get valid
// signatures and prove anything about the chain
type SignClient interface {
Block(height int) (*ctypes.ResultBlock, error)
Commit(height int) (*ctypes.ResultCommit, error)
Validators() (*ctypes.ResultValidators, error)
Block(height *int) (*ctypes.ResultBlock, error)
Commit(height *int) (*ctypes.ResultCommit, error)
Validators(height *int) (*ctypes.ResultValidators, error)
Tx(hash []byte, prove bool) (*ctypes.ResultTx, error)
}


+ 4
- 4
rpc/client/localclient.go View File

@ -93,16 +93,16 @@ func (c Local) Genesis() (*ctypes.ResultGenesis, error) {
return core.Genesis()
}
func (c Local) Block(height int) (*ctypes.ResultBlock, error) {
func (c Local) Block(height *int) (*ctypes.ResultBlock, error) {
return core.Block(height)
}
func (c Local) Commit(height int) (*ctypes.ResultCommit, error) {
func (c Local) Commit(height *int) (*ctypes.ResultCommit, error) {
return core.Commit(height)
}
func (c Local) Validators() (*ctypes.ResultValidators, error) {
return core.Validators()
func (c Local) Validators(height *int) (*ctypes.ResultValidators, error) {
return core.Validators(height)
}
func (c Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {


+ 3
- 3
rpc/client/main_test.go View File

@ -4,7 +4,7 @@ import (
"os"
"testing"
meapp "github.com/tendermint/merkleeyes/app"
"github.com/tendermint/abci/example/dummy"
nm "github.com/tendermint/tendermint/node"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
@ -12,8 +12,8 @@ import (
var node *nm.Node
func TestMain(m *testing.M) {
// start a tendermint node (and merkleeyes) in the background to test against
app := meapp.NewMerkleEyesApp("", 100)
// start a tendermint node (and dummy) in the background to test against
app := dummy.NewDummyApplication()
node = rpctest.StartTendermint(app)
code := m.Run()


+ 2
- 1
rpc/client/mock/abci.go View File

@ -6,6 +6,7 @@ import (
"github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
// ABCIApp will send all abci related request to the named app,
@ -20,7 +21,7 @@ func (a ABCIApp) _assertABCIClient() client.ABCIClient {
}
func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return &ctypes.ResultABCIInfo{a.App.Info()}, nil
return &ctypes.ResultABCIInfo{a.App.Info(abci.RequestInfo{version.Version})}, nil
}
func (a ABCIApp) ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) {


+ 4
- 4
rpc/client/mock/client.go View File

@ -116,14 +116,14 @@ func (c Client) Genesis() (*ctypes.ResultGenesis, error) {
return core.Genesis()
}
func (c Client) Block(height int) (*ctypes.ResultBlock, error) {
func (c Client) Block(height *int) (*ctypes.ResultBlock, error) {
return core.Block(height)
}
func (c Client) Commit(height int) (*ctypes.ResultCommit, error) {
func (c Client) Commit(height *int) (*ctypes.ResultCommit, error) {
return core.Commit(height)
}
func (c Client) Validators() (*ctypes.ResultValidators, error) {
return core.Validators()
func (c Client) Validators(height *int) (*ctypes.ResultValidators, error) {
return core.Validators(height)
}

+ 13
- 12
rpc/client/rpc_test.go View File

@ -7,7 +7,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/merkleeyes/iavl"
merktest "github.com/tendermint/merkleeyes/testutil"
"github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
@ -49,7 +48,7 @@ func TestInfo(t *testing.T) {
require.Nil(t, err, "%d: %+v", i, err)
// TODO: this is not correct - fix merkleeyes!
// assert.EqualValues(t, status.LatestBlockHeight, info.Response.LastBlockHeight)
assert.True(t, strings.HasPrefix(info.Response.Data, "size"))
assert.True(t, strings.Contains(info.Response.Data, "size"))
}
}
@ -87,13 +86,13 @@ func TestGenesisAndValidators(t *testing.T) {
gval := gen.Genesis.Validators[0]
// get the current validators
vals, err := c.Validators()
vals, err := c.Validators(nil)
require.Nil(t, err, "%d: %+v", i, err)
require.Equal(t, 1, len(vals.Validators))
val := vals.Validators[0]
// make sure the current set is also the genesis set
assert.Equal(t, gval.Amount, val.VotingPower)
assert.Equal(t, gval.Power, val.VotingPower)
assert.Equal(t, gval.PubKey, val.PubKey)
}
}
@ -110,11 +109,12 @@ func TestAppCalls(t *testing.T) {
sh := s.LatestBlockHeight
// look for the future
_, err = c.Block(sh + 2)
h := sh + 2
_, err = c.Block(&h)
assert.NotNil(err) // no block yet
// write something
k, v, tx := merktest.MakeTxKV()
k, v, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%d: %+v", i, err)
require.True(bres.DeliverTx.Code.IsOK())
@ -137,7 +137,7 @@ func TestAppCalls(t *testing.T) {
assert.EqualValues(tx, ptx.Tx)
// and we can even check the block is added
block, err := c.Block(apph)
block, err := c.Block(&apph)
require.Nil(err, "%d: %+v", i, err)
appHash := block.BlockMeta.Header.AppHash
assert.True(len(appHash) > 0)
@ -158,14 +158,15 @@ func TestAppCalls(t *testing.T) {
}
// and get the corresponding commit with the same apphash
commit, err := c.Commit(apph)
commit, err := c.Commit(&apph)
require.Nil(err, "%d: %+v", i, err)
cappHash := commit.Header.AppHash
assert.Equal(appHash, cappHash)
assert.NotNil(commit.Commit)
// compare the commits (note Commit(2) has commit from Block(3))
commit2, err := c.Commit(apph - 1)
h = apph - 1
commit2, err := c.Commit(&h)
require.Nil(err, "%d: %+v", i, err)
assert.Equal(block.Block.LastCommit, commit2.Commit)
@ -191,7 +192,7 @@ func TestBroadcastTxSync(t *testing.T) {
initMempoolSize := mempool.Size()
for i, c := range GetClients() {
_, _, tx := merktest.MakeTxKV()
_, _, tx := MakeTxKV()
bres, err := c.BroadcastTxSync(tx)
require.Nil(err, "%d: %+v", i, err)
require.True(bres.Code.IsOK())
@ -209,7 +210,7 @@ func TestBroadcastTxCommit(t *testing.T) {
mempool := node.MempoolReactor().Mempool
for i, c := range GetClients() {
_, _, tx := merktest.MakeTxKV()
_, _, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%d: %+v", i, err)
require.True(bres.CheckTx.Code.IsOK())
@ -224,7 +225,7 @@ func TestTx(t *testing.T) {
// first we broadcast a tx
c := getHTTPClient()
_, _, tx := merktest.MakeTxKV()
_, _, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%+v", err)


+ 2
- 1
rpc/core/abci.go View File

@ -4,6 +4,7 @@ import (
abci "github.com/tendermint/abci/types"
data "github.com/tendermint/go-wire/data"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/version"
)
// Query the application for some information.
@ -86,7 +87,7 @@ func ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuer
// }
// ```
func ABCIInfo() (*ctypes.ResultABCIInfo, error) {
resInfo, err := proxyAppQuery.InfoSync()
resInfo, err := proxyAppQuery.InfoSync(abci.RequestInfo{version.Version})
if err != nil {
return nil, err
}


+ 22
- 4
rpc/core/blocks.go View File

@ -85,6 +85,7 @@ func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, err
}
// Get block at a given height.
// If no height is provided, it will fetch the latest block.
//
// ```shell
// curl 'localhost:46657/block?height=10'
@ -183,8 +184,16 @@ func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, err
// "jsonrpc": "2.0"
// }
// ```
func Block(height int) (*ctypes.ResultBlock, error) {
if height == 0 {
func Block(heightPtr *int) (*ctypes.ResultBlock, error) {
if heightPtr == nil {
height := blockStore.Height()
blockMeta := blockStore.LoadBlockMeta(height)
block := blockStore.LoadBlock(height)
return &ctypes.ResultBlock{blockMeta, block}, nil
}
height := *heightPtr
if height <= 0 {
return nil, fmt.Errorf("Height must be greater than 0")
}
if height > blockStore.Height() {
@ -197,6 +206,7 @@ func Block(height int) (*ctypes.ResultBlock, error) {
}
// Get block commit at a given height.
// If no height is provided, it will fetch the commit for the latest block.
//
// ```shell
// curl 'localhost:46657/commit?height=11'
@ -265,8 +275,16 @@ func Block(height int) (*ctypes.ResultBlock, error) {
// "jsonrpc": "2.0"
// }
// ```
func Commit(height int) (*ctypes.ResultCommit, error) {
if height == 0 {
func Commit(heightPtr *int) (*ctypes.ResultCommit, error) {
if heightPtr == nil {
height := blockStore.Height()
header := blockStore.LoadBlockMeta(height).Header
commit := blockStore.LoadSeenCommit(height)
return &ctypes.ResultCommit{header, commit, false}, nil
}
height := *heightPtr
if height <= 0 {
return nil, fmt.Errorf("Height must be greater than 0")
}
storeHeight := blockStore.Height()


+ 17
- 6
rpc/core/consensus.go View File

@ -7,7 +7,8 @@ import (
"github.com/tendermint/tendermint/types"
)
// Get current validators set along with a block height.
// Get the validator set at the given block height.
// If no height is provided, it will fetch the current validator set.
//
// ```shell
// curl 'localhost:46657/validators'
@ -41,9 +42,19 @@ import (
// "jsonrpc": "2.0"
// }
// ```
func Validators() (*ctypes.ResultValidators, error) {
blockHeight, validators := consensusState.GetValidators()
return &ctypes.ResultValidators{blockHeight, validators}, nil
func Validators(heightPtr *int) (*ctypes.ResultValidators, error) {
if heightPtr == nil {
blockHeight, validators := consensusState.GetValidators()
return &ctypes.ResultValidators{blockHeight, validators}, nil
}
height := *heightPtr
state := consensusState.GetState()
validators, err := state.LoadValidators(height)
if err != nil {
return nil, err
}
return &ctypes.ResultValidators{height, validators.Validators}, nil
}
// Dump consensus state.
@ -75,9 +86,9 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
peerRoundStates := []string{}
for _, peer := range p2pSwitch.Peers().List() {
// TODO: clean this up?
peerState := peer.Data.Get(types.PeerStateKey).(*cm.PeerState)
peerState := peer.Get(types.PeerStateKey).(*cm.PeerState)
peerRoundState := peerState.GetRoundState()
peerRoundStateStr := peer.Key + ":" + string(wire.JSONBytes(peerRoundState))
peerRoundStateStr := peer.Key() + ":" + string(wire.JSONBytes(peerRoundState))
peerRoundStates = append(peerRoundStates, peerRoundStateStr)
}
return &ctypes.ResultDumpConsensusState{roundState.String(), peerRoundStates}, nil


+ 2
- 2
rpc/core/events.go View File

@ -2,7 +2,7 @@ package core
import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/rpc/lib/types"
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
"github.com/tendermint/tendermint/types"
)
@ -39,7 +39,7 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, event string) (*ctypes.ResultSubscri
// NOTE: EventSwitch callbacks must be nonblocking
// NOTE: RPCResponses of subscribed events have id suffix "#event"
tmResult := &ctypes.ResultEvent{event, msg}
wsCtx.TryWriteRPCResponse(rpctypes.NewRPCResponse(wsCtx.Request.ID+"#event", tmResult, ""))
wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Request.ID+"#event", tmResult))
})
return &ctypes.ResultSubscribe{}, nil
}


+ 3
- 3
rpc/core/net.go View File

@ -42,9 +42,9 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
peers := []ctypes.Peer{}
for _, peer := range p2pSwitch.Peers().List() {
peers = append(peers, ctypes.Peer{
NodeInfo: *peer.NodeInfo,
NodeInfo: *peer.NodeInfo(),
IsOutbound: peer.IsOutbound(),
ConnectionStatus: peer.Connection().Status(),
ConnectionStatus: peer.Status(),
})
}
return &ctypes.ResultNetInfo{
@ -90,7 +90,7 @@ func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
// "validators": [
// {
// "name": "",
// "amount": 10,
// "power": 10,
// "pub_key": {
// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D",
// "type": "ed25519"


+ 4
- 1
rpc/core/pipe.go View File

@ -2,18 +2,21 @@ package core
import (
crypto "github.com/tendermint/go-crypto"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/consensus"
p2p "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/log"
)
//----------------------------------------------
// These interfaces are used by RPC and must be thread safe
type Consensus interface {
GetState() *sm.State
GetValidators() (int, []*types.Validator)
GetRoundState() *consensus.RoundState
}


+ 1
- 1
rpc/core/routes.go View File

@ -18,7 +18,7 @@ var Routes = map[string]*rpc.RPCFunc{
"block": rpc.NewRPCFunc(Block, "height"),
"commit": rpc.NewRPCFunc(Commit, "height"),
"tx": rpc.NewRPCFunc(Tx, "hash,prove"),
"validators": rpc.NewRPCFunc(Validators, ""),
"validators": rpc.NewRPCFunc(Validators, "height"),
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""),
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, ""),
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),


+ 3
- 4
rpc/lib/client/http_client.go View File

@ -75,7 +75,7 @@ func NewJSONRPCClient(remote string) *JSONRPCClient {
}
func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) {
request, err := types.MapToRequest("", method, params)
request, err := types.MapToRequest("jsonrpc-client", method, params)
if err != nil {
return nil, err
}
@ -146,9 +146,8 @@ func unmarshalResponseBytes(responseBytes []byte, result interface{}) (interface
if err != nil {
return nil, errors.Errorf("Error unmarshalling rpc response: %v", err)
}
errorStr := response.Error
if errorStr != "" {
return nil, errors.Errorf("Response error: %v", errorStr)
if response.Error != nil {
return nil, errors.Errorf("Response error: %v", response.Error.Message)
}
// unmarshal the RawMessage into the result
err = json.Unmarshal(*response.Result, result)


+ 4
- 4
rpc/lib/client/ws_client.go View File

@ -195,7 +195,7 @@ func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error {
// Call the given method. See Send description.
func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error {
request, err := types.MapToRequest("", method, params)
request, err := types.MapToRequest("ws-client", method, params)
if err != nil {
return err
}
@ -205,7 +205,7 @@ func (c *WSClient) Call(ctx context.Context, method string, params map[string]in
// CallWithArrayParams the given method with params in a form of array. See
// Send description.
func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error {
request, err := types.ArrayToRequest("", method, params)
request, err := types.ArrayToRequest("ws-client", method, params)
if err != nil {
return err
}
@ -422,8 +422,8 @@ func (c *WSClient) readRoutine() {
c.ErrorsCh <- err
continue
}
if response.Error != "" {
c.ErrorsCh <- errors.Errorf(response.Error)
if response.Error != nil {
c.ErrorsCh <- errors.New(response.Error.Message)
continue
}
c.Logger.Info("got response", "resp", response.Result)


+ 27
- 21
rpc/lib/server/handlers.go View File

@ -110,35 +110,36 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
var request types.RPCRequest
err := json.Unmarshal(b, &request)
if err != nil {
WriteRPCResponseHTTPError(w, http.StatusBadRequest, types.NewRPCResponse("", nil, fmt.Sprintf("Error unmarshalling request: %v", err.Error())))
WriteRPCResponseHTTP(w, types.RPCParseError("", errors.Wrap(err, "Error unmarshalling request")))
return
}
if len(r.URL.Path) > 1 {
WriteRPCResponseHTTPError(w, http.StatusNotFound, types.NewRPCResponse(request.ID, nil, fmt.Sprintf("Invalid JSONRPC endpoint %s", r.URL.Path)))
// A Notification is a Request object without an "id" member.
// The Server MUST NOT reply to a Notification, including those that are within a batch request.
if request.ID == "" {
return
}
rpcFunc := funcMap[request.Method]
if rpcFunc == nil {
WriteRPCResponseHTTPError(w, http.StatusNotFound, types.NewRPCResponse(request.ID, nil, "RPC method unknown: "+request.Method))
if len(r.URL.Path) > 1 {
WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(request.ID, errors.Errorf("Path %s is invalid", r.URL.Path)))
return
}
if rpcFunc.ws {
WriteRPCResponseHTTPError(w, http.StatusMethodNotAllowed, types.NewRPCResponse(request.ID, nil, "RPC method is only for websockets: "+request.Method))
rpcFunc := funcMap[request.Method]
if rpcFunc == nil || rpcFunc.ws {
WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(request.ID))
return
}
args, err := jsonParamsToArgsRPC(rpcFunc, request.Params)
if err != nil {
WriteRPCResponseHTTPError(w, http.StatusBadRequest, types.NewRPCResponse(request.ID, nil, fmt.Sprintf("Error converting json params to arguments: %v", err.Error())))
WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "Error converting json params to arguments")))
return
}
returns := rpcFunc.f.Call(args)
logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns)
result, err := unreflectResult(returns)
if err != nil {
WriteRPCResponseHTTPError(w, http.StatusInternalServerError, types.NewRPCResponse(request.ID, result, err.Error()))
WriteRPCResponseHTTP(w, types.RPCInternalError(request.ID, err))
return
}
WriteRPCResponseHTTP(w, types.NewRPCResponse(request.ID, result, ""))
WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(request.ID, result))
}
}
@ -229,7 +230,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit
// Exception for websocket endpoints
if rpcFunc.ws {
return func(w http.ResponseWriter, r *http.Request) {
WriteRPCResponseHTTPError(w, http.StatusMethodNotAllowed, types.NewRPCResponse("", nil, "This RPC method is only for websockets"))
WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(""))
}
}
// All other endpoints
@ -237,17 +238,17 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit
logger.Debug("HTTP HANDLER", "req", r)
args, err := httpParamsToArgs(rpcFunc, r)
if err != nil {
WriteRPCResponseHTTPError(w, http.StatusBadRequest, types.NewRPCResponse("", nil, fmt.Sprintf("Error converting http params to args: %v", err.Error())))
WriteRPCResponseHTTP(w, types.RPCInvalidParamsError("", errors.Wrap(err, "Error converting http params to arguments")))
return
}
returns := rpcFunc.f.Call(args)
logger.Info("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns)
result, err := unreflectResult(returns)
if err != nil {
WriteRPCResponseHTTPError(w, http.StatusInternalServerError, types.NewRPCResponse("", nil, err.Error()))
WriteRPCResponseHTTP(w, types.RPCInternalError("", err))
return
}
WriteRPCResponseHTTP(w, types.NewRPCResponse("", result, ""))
WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse("", result))
}
}
@ -509,8 +510,13 @@ func (wsc *wsConnection) readRoutine() {
var request types.RPCRequest
err = json.Unmarshal(in, &request)
if err != nil {
errStr := fmt.Sprintf("Error unmarshaling data: %s", err.Error())
wsc.WriteRPCResponse(types.NewRPCResponse(request.ID, nil, errStr))
wsc.WriteRPCResponse(types.RPCParseError("", errors.Wrap(err, "Error unmarshaling request")))
continue
}
// A Notification is a Request object without an "id" member.
// The Server MUST NOT reply to a Notification, including those that are within a batch request.
if request.ID == "" {
continue
}
@ -518,7 +524,7 @@ func (wsc *wsConnection) readRoutine() {
rpcFunc := wsc.funcMap[request.Method]
if rpcFunc == nil {
wsc.WriteRPCResponse(types.NewRPCResponse(request.ID, nil, "RPC method unknown: "+request.Method))
wsc.WriteRPCResponse(types.RPCMethodNotFoundError(request.ID))
continue
}
var args []reflect.Value
@ -529,7 +535,7 @@ func (wsc *wsConnection) readRoutine() {
args, err = jsonParamsToArgsRPC(rpcFunc, request.Params)
}
if err != nil {
wsc.WriteRPCResponse(types.NewRPCResponse(request.ID, nil, err.Error()))
wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "Error converting json params to arguments")))
continue
}
returns := rpcFunc.f.Call(args)
@ -539,10 +545,10 @@ func (wsc *wsConnection) readRoutine() {
result, err := unreflectResult(returns)
if err != nil {
wsc.WriteRPCResponse(types.NewRPCResponse(request.ID, nil, err.Error()))
wsc.WriteRPCResponse(types.RPCInternalError(request.ID, err))
continue
} else {
wsc.WriteRPCResponse(types.NewRPCResponse(request.ID, result, ""))
wsc.WriteRPCResponse(types.NewRPCSuccessResponse(request.ID, result))
continue
}


+ 2
- 1
rpc/lib/server/http_server.go View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/pkg/errors"
types "github.com/tendermint/tendermint/rpc/lib/types"
"github.com/tendermint/tmlibs/log"
)
@ -99,7 +100,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler
// For the rest,
logger.Error("Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack()))
rww.WriteHeader(http.StatusInternalServerError)
WriteRPCResponseHTTP(rww, types.NewRPCResponse("", nil, fmt.Sprintf("Internal Server Error: %v", e)))
WriteRPCResponseHTTP(rww, types.RPCInternalError("", e.(error)))
}
}


+ 54
- 13
rpc/lib/types/types.go View File

@ -5,9 +5,14 @@ import (
"fmt"
"strings"
"github.com/pkg/errors"
events "github.com/tendermint/tmlibs/events"
)
//----------------------------------------
// REQUEST
type RPCRequest struct {
JSONRPC string `json:"jsonrpc"`
ID string `json:"id"`
@ -47,42 +52,77 @@ func ArrayToRequest(id string, method string, params []interface{}) (RPCRequest,
}
//----------------------------------------
// RESPONSE
type RPCError struct {
Code int `json:"code"`
Message string `json:"message"`
Data string `json:"data,omitempty"`
}
type RPCResponse struct {
JSONRPC string `json:"jsonrpc"`
ID string `json:"id"`
Result *json.RawMessage `json:"result"`
Error string `json:"error"`
Result *json.RawMessage `json:"result,omitempty"`
Error *RPCError `json:"error,omitempty"`
}
func NewRPCResponse(id string, res interface{}, err string) RPCResponse {
func NewRPCSuccessResponse(id string, res interface{}) RPCResponse {
var raw *json.RawMessage
if res != nil {
var js []byte
js, err2 := json.Marshal(res)
if err2 == nil {
rawMsg := json.RawMessage(js)
raw = &rawMsg
} else {
err = err2.Error()
js, err := json.Marshal(res)
if err != nil {
return RPCInternalError(id, errors.Wrap(err, "Error marshalling response"))
}
rawMsg := json.RawMessage(js)
raw = &rawMsg
}
return RPCResponse{JSONRPC: "2.0", ID: id, Result: raw}
}
func NewRPCErrorResponse(id string, code int, msg string, data string) RPCResponse {
return RPCResponse{
JSONRPC: "2.0",
ID: id,
Result: raw,
Error: err,
Error: &RPCError{Code: code, Message: msg, Data: data},
}
}
func (resp RPCResponse) String() string {
if resp.Error == "" {
if resp.Error == nil {
return fmt.Sprintf("[%s %v]", resp.ID, resp.Result)
} else {
return fmt.Sprintf("[%s %s]", resp.ID, resp.Error)
}
}
func RPCParseError(id string, err error) RPCResponse {
return NewRPCErrorResponse(id, -32700, "Parse error. Invalid JSON", err.Error())
}
func RPCInvalidRequestError(id string, err error) RPCResponse {
return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error())
}
func RPCMethodNotFoundError(id string) RPCResponse {
return NewRPCErrorResponse(id, -32601, "Method not found", "")
}
func RPCInvalidParamsError(id string, err error) RPCResponse {
return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error())
}
func RPCInternalError(id string, err error) RPCResponse {
return NewRPCErrorResponse(id, -32603, "Internal error", err.Error())
}
func RPCServerError(id string, err error) RPCResponse {
return NewRPCErrorResponse(id, -32000, "Server error", err.Error())
}
//----------------------------------------
// *wsConnection implements this interface.
@ -100,10 +140,11 @@ type WSRPCContext struct {
}
//----------------------------------------
// sockets
// SOCKETS
//
// Determine if its a unix or tcp socket.
// If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port
// TODO: deprecate
func SocketType(listenAddr string) string {
socketType := "unix"
if len(strings.Split(listenAddr, ":")) >= 2 {


+ 32
- 0
rpc/lib/types/types_test.go View File

@ -0,0 +1,32 @@
package rpctypes
import (
"encoding/json"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
type SampleResult struct {
Value string
}
func TestResponses(t *testing.T) {
assert := assert.New(t)
a := NewRPCSuccessResponse("1", &SampleResult{"hello"})
b, _ := json.Marshal(a)
s := `{"jsonrpc":"2.0","id":"1","result":{"Value":"hello"}}`
assert.Equal(string(s), string(b))
d := RPCParseError("1", errors.New("Hello world"))
e, _ := json.Marshal(d)
f := `{"jsonrpc":"2.0","id":"1","error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}`
assert.Equal(string(f), string(e))
g := RPCMethodNotFoundError("2")
h, _ := json.Marshal(g)
i := `{"jsonrpc":"2.0","id":"2","error":{"code":-32601,"message":"Method not found"}}`
assert.Equal(string(h), string(i))
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save